code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to tadek-licenses@comarch.com ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to tadek-licenses@comarch.com ##
## ##
################################################################################
import os
import re
import threading
from PySide import QtCore
from PySide import QtGui
from tadek.core import log
import utils
class ProgressDialog(QtGui.QProgressDialog):
'''
A progress dialog class.
'''
def __init__(self, parent=None):
QtGui.QProgressDialog.__init__(self, parent=parent)
self.setWindowModality(QtCore.Qt.WindowModal)
self.setRange(0, 0)
self.setCancelButtonText(None)
self._dict = {}
self._timeouts = {}
self._id = -1
self._current = -1
self._lock = threading.RLock()
# Private methods:
def _update(self):
'''
Displays the message with least timeout value.
'''
def compare(a, b):
if a == b or (a is None and b is None):
return 0
elif a is None:
return -1
elif b is None:
return 1
elif a > b:
return -1
elif a < b:
return 1
ids = sorted(self._dict, cmp=compare,
key=lambda k: self._dict[k][2], reverse=True)
if not ids:
self.close()
return
elif ids[0] != self._current:
title, message = self._dict[ids[0]][:2]
self.setWindowTitle(title)
self.setLabelText(message)
self._current = ids[0]
if not self.isVisible():
self.show()
# Slots:
#@QtCore.Slot()
def _handleTimeout(self):
'''
Removes the message identified by ID of the sender.
'''
id = self.sender()._id
message = self._dict[id][1]
self.remove(id)
runWarning("Timeout reached for operation:\n%s" % message)
# Public methods:
def add(self, message, title, timeout=None):
'''
Adds a message and returns an ID.
'''
self._lock.acquire()
try:
id = self._id + 1
timer = None
if timeout is not None:
timer = QtCore.QTimer(self)
timer.setSingleShot(True)
timer.setInterval(timeout)
timer._id = id
timer.timeout.connect(self._handleTimeout)
timer.start()
self._dict[id] = (title, message, timeout, timer)
self._id = id
self._update()
return id
finally:
self._lock.release()
def remove(self, id):
'''
Removes a message of given ID.
'''
self._lock.acquire()
try:
if id not in self._dict:
return
timer = self._dict.pop(id)[3]
if timer and timer.isActive:
timer.stop()
self._update()
finally:
self._lock.release()
def closeEvent(self, event):
'''
Prevents the dialog from closing unless there are no more messages.
'''
if self._dict:
event.ignore()
# instance of ProgressDialog
_progress = None
def runProgress(message, title="Wait", timeout=None):
'''
Enqueues a message to the progress dialog.
:param message: Message to be displayed inside the dialog
:type message: string
:param title: Title of the dialog
:type title: string
:param timeout: Timeout of the dialog in microseconds, the default value
is infinity
:type timeout: integer
:return: Unique identifier of the message
:rtype: integer
'''
global _progress
if _progress is None:
_progress = ProgressDialog(utils.window())
return _progress.add(message, title, timeout)
def closeProgress(id):
'''
Removes a message from the progress dialog.
:param id: Identifier of a message to remove
:type id: integer
'''
_progress.remove(id)
def runError(message, title="Error"):
'''
Runs an error message box with the given title and message.
:param message: Message to be displayed inside the dialog
:type message: string
:param title: Title of the dialog, if not provided, "Error" is set
:type title: string
'''
log.error(message)
return QtGui.QMessageBox.critical(utils.window(), title, message)
def runWarning(message, title="Warning"):
'''
Runs an warning message box with the given title and message.
:param message: Message to be displayed inside the dialog
:type message: string
:param title: Title of the dialog, if not provided, "Warning" is set
:type title: string
'''
log.warning(message)
return QtGui.QMessageBox.warning(utils.window(), title, message)
def runInformation(message, title="Information"):
'''
Runs an information message box with the given title and message.
:param message: Message to be displayed inside the dialog
:type message: string
:param title: Title of the dialog, if not provided, "Information" is set
:type title: string
'''
log.info(message)
return QtGui.QMessageBox.information(utils.window(), title, message)
def runQuestion(message, title="Question"):
'''
Runs a question message box with the given title and message.
:param message: Message to be displayed inside the dialog
:type message: string
:param title: Title of the dialog, if not provided, "Question" is set
:type title: string
:return: Answer to a question
:rtype: boolean
'''
btns = {QtGui.QMessageBox.Yes: True, QtGui.QMessageBox.No: False}
b = reduce(QtGui.QMessageBox.StandardButton.__ror__, btns.keys())
ret = btns[QtGui.QMessageBox.question(utils.window(), title, message, b)]
log.info("%s: %s" % (message, ret))
return ret
def runSaveFile(filters, name=""):
'''
Runs a save file dialog and returns a path or None.
:param filters: Filters in QFileDialog format
:type filters: string
:param name: Default file name
:type name: string
:return: Path to a file or None
:rtype: string
'''
dialog = QtGui.QFileDialog(utils.window())
dialog.setAcceptMode(QtGui.QFileDialog.AcceptMode.AcceptSave)
dialog.setFileMode(QtGui.QFileDialog.FileMode.AnyFile)
dialog.setOption(QtGui.QFileDialog.DontConfirmOverwrite)
extensions = {}
for a in filters.split(";;"):
m = re.search("\(\*(.*)\)", a)
if not m:
continue
extensions[a] = m.group(1).strip(".")
dialog.setFilter(filters)
dialog.selectFile(name)
if not dialog.exec_():
return None
path = dialog.selectedFiles()[0]
ext = extensions[dialog.selectedFilter()]
if ext not in ("", "*") and os.path.splitext(path)[1].strip(".") != ext:
path = "%s.%s" % (path, ext)
if os.path.exists(path):
if not runQuestion("'%s' already exists.\nDo you want to replace it?"
% os.path.split(path)[1]):
return None
dialog.setParent(None)
return path | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time
from upm import pyupm_md as upmmd
def main():
I2C_BUS = upmmd.MD_I2C_BUS
I2C_ADDR = upmmd.MD_DEFAULT_I2C_ADDR
# Instantiate an I2C Motor Driver on I2C bus 0
myMotorDriver = upmmd.MD(I2C_BUS, I2C_ADDR)
# set direction to CW and set speed to 50%
print("Spin M1 and M2 at half speed for 3 seconds")
myMotorDriver.setMotorDirections(upmmd.MD_DIR_CW, upmmd.MD_DIR_CW)
myMotorDriver.setMotorSpeeds(127, 127)
time.sleep(3)
# counter clockwise
print("Reversing M1 and M2 for 3 seconds")
myMotorDriver.setMotorDirections(upmmd.MD_DIR_CCW,
upmmd.MD_DIR_CCW)
time.sleep(3)
print("Stopping motors")
myMotorDriver.setMotorSpeeds(0, 0)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class SwedishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"a", -1, 1),
Among(u"arna", 0, 1),
Among(u"erna", 0, 1),
Among(u"heterna", 2, 1),
Among(u"orna", 0, 1),
Among(u"ad", -1, 1),
Among(u"e", -1, 1),
Among(u"ade", 6, 1),
Among(u"ande", 6, 1),
Among(u"arne", 6, 1),
Among(u"are", 6, 1),
Among(u"aste", 6, 1),
Among(u"en", -1, 1),
Among(u"anden", 12, 1),
Among(u"aren", 12, 1),
Among(u"heten", 12, 1),
Among(u"ern", -1, 1),
Among(u"ar", -1, 1),
Among(u"er", -1, 1),
Among(u"heter", 18, 1),
Among(u"or", -1, 1),
Among(u"s", -1, 2),
Among(u"as", 21, 1),
Among(u"arnas", 22, 1),
Among(u"ernas", 22, 1),
Among(u"ornas", 22, 1),
Among(u"es", 21, 1),
Among(u"ades", 26, 1),
Among(u"andes", 26, 1),
Among(u"ens", 21, 1),
Among(u"arens", 29, 1),
Among(u"hetens", 29, 1),
Among(u"erns", 21, 1),
Among(u"at", -1, 1),
Among(u"andet", -1, 1),
Among(u"het", -1, 1),
Among(u"ast", -1, 1)
]
a_1 = [
Among(u"dd", -1, -1),
Among(u"gd", -1, -1),
Among(u"nn", -1, -1),
Among(u"dt", -1, -1),
Among(u"gt", -1, -1),
Among(u"kt", -1, -1),
Among(u"tt", -1, -1)
]
a_2 = [
Among(u"ig", -1, 1),
Among(u"lig", 0, 1),
Among(u"els", -1, 1),
Among(u"fullt", -1, 3),
Among(u"l\u00F6st", -1, 2)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 0, 32]
g_s_ending = [119, 127, 149]
I_x = 0
I_p1 = 0
def copy_from(self, other):
self.I_x = other.I_x
self.I_p1 = other.I_p1
super.copy_from(other)
def r_mark_regions(self):
# (, line 26
self.I_p1 = self.limit;
# test, line 29
v_1 = self.cursor
# (, line 29
# hop, line 29
c = self.cursor + 3
if 0 > c or c > self.limit:
return False
self.cursor = c
# setmark x, line 29
self.I_x = self.cursor
self.cursor = v_1
# goto, line 30
try:
while True:
v_2 = self.cursor
try:
if not self.in_grouping(SwedishStemmer.g_v, 97, 246):
raise lab1()
self.cursor = v_2
raise lab0()
except lab1: pass
self.cursor = v_2
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 30
try:
while True:
try:
if not self.out_grouping(SwedishStemmer.g_v, 97, 246):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 30
self.I_p1 = self.cursor
# try, line 31
try:
# (, line 31
if not (self.I_p1 < self.I_x):
raise lab4()
self.I_p1 = self.I_x;
except lab4: pass
return True
def r_main_suffix(self):
# (, line 36
# setlimit, line 37
v_1 = self.limit - self.cursor
# tomark, line 37
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 37
# [, line 37
self.ket = self.cursor
# substring, line 37
among_var = self.find_among_b(SwedishStemmer.a_0, 37)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 37
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 44
# delete, line 44
if not self.slice_del():
return False
elif among_var == 2:
# (, line 46
if not self.in_grouping_b(SwedishStemmer.g_s_ending, 98, 121):
return False
# delete, line 46
if not self.slice_del():
return False
return True
def r_consonant_pair(self):
# setlimit, line 50
v_1 = self.limit - self.cursor
# tomark, line 50
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 50
# and, line 52
v_3 = self.limit - self.cursor
# among, line 51
if self.find_among_b(SwedishStemmer.a_1, 7) == 0:
self.limit_backward = v_2
return False
self.cursor = self.limit - v_3
# (, line 52
# [, line 52
self.ket = self.cursor
# next, line 52
if self.cursor <= self.limit_backward:
self.limit_backward = v_2
return False
self.cursor -= 1
# ], line 52
self.bra = self.cursor
# delete, line 52
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_other_suffix(self):
# setlimit, line 55
v_1 = self.limit - self.cursor
# tomark, line 55
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 55
# [, line 56
self.ket = self.cursor
# substring, line 56
among_var = self.find_among_b(SwedishStemmer.a_2, 5)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 56
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 57
# delete, line 57
if not self.slice_del():
return False
elif among_var == 2:
# (, line 58
# <-, line 58
if not self.slice_from(u"l\u00F6s"):
return False
elif among_var == 3:
# (, line 59
# <-, line 59
if not self.slice_from(u"full"):
return False
self.limit_backward = v_2
return True
def _stem(self):
# (, line 64
# do, line 66
v_1 = self.cursor
try:
# call mark_regions, line 66
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 67
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 67
# do, line 68
v_2 = self.limit - self.cursor
try:
# call main_suffix, line 68
if not self.r_main_suffix():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 69
v_3 = self.limit - self.cursor
try:
# call consonant_pair, line 69
if not self.r_consonant_pair():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 70
v_4 = self.limit - self.cursor
try:
# call other_suffix, line 70
if not self.r_other_suffix():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, SwedishStemmer)
def hashCode(self):
return hash("SwedishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass | unknown | codeparrot/codeparrot-clean | ||
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
} | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
from requests.compat import urlencode, urljoin
from sickbeard import classes, logger, tvcache
from sickrage.helper.exceptions import AuthException
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
try:
import json
except ImportError:
import simplejson as json
class HDBitsProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "HDBits")
self.username = None
self.passkey = None
self.cache = HDBitsCache(self, min_time=15) # only poll HDBits every 15 minutes max
self.url = 'https://hdbits.org'
self.urls = {
'search': urljoin(self.url, '/api/torrents'),
'rss': urljoin(self.url, '/api/torrents'),
'download': urljoin(self.url, '/download.php')
}
def _check_auth(self):
if not self.username or not self.passkey:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
@staticmethod
def _check_auth_from_data(parsed_json):
""" Check that we are authenticated. """
if 'status' in parsed_json and 'message' in parsed_json and parsed_json.get('status') == 5:
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return True
def _get_season_search_strings(self, ep_obj):
season_search_string = [self._make_post_data_JSON(show=ep_obj.show, season=ep_obj)]
return season_search_string
def _get_episode_search_strings(self, ep_obj, add_string=''):
episode_search_string = [self._make_post_data_JSON(show=ep_obj.show, episode=ep_obj)]
return episode_search_string
def _get_title_and_url(self, item):
title = item.get('name', '').replace(' ', '.')
url = self.urls['download'] + '?' + urlencode({'id': item['id'], 'passkey': self.passkey})
return title, url
def search(self, search_params, age=0, ep_obj=None):
# FIXME
results = []
logger.log("Search string: {0}".format
(search_params.decode('utf-8')), logger.DEBUG)
self._check_auth()
parsed_json = self.get_url(self.urls['search'], post_data=search_params, returns='json')
if not parsed_json:
return []
if self._check_auth_from_data(parsed_json):
if parsed_json and 'data' in parsed_json:
items = parsed_json['data']
else:
logger.log("Resulting JSON from provider isn't correct, not parsing it", logger.ERROR)
items = []
for item in items:
results.append(item)
# FIXME SORTING
return results
def find_propers(self, search_date=None):
results = []
search_terms = [' proper ', ' repack ']
for term in search_terms:
for item in self.search(self._make_post_data_JSON(search_term=term)):
if item['utadded']:
try:
result_date = datetime.datetime.fromtimestamp(int(item['utadded']))
except Exception:
result_date = None
if result_date and (not search_date or result_date > search_date):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date, self.show))
return results
def _make_post_data_JSON(self, show=None, episode=None, season=None, search_term=None):
post_data = {
'username': self.username,
'passkey': self.passkey,
'category': [2],
# TV Category
}
if episode:
if show.air_by_date:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': str(episode.airdate).replace('-', '|')
}
elif show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': episode.airdate.strftime('%b')
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': "{0:d}".format(int(episode.scene_absolute_number))
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': episode.scene_season,
'episode': episode.scene_episode
}
if season:
if show.air_by_date or show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'season': str(season.airdate)[:7],
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'season': "{0:d}".format(season.scene_absolute_number),
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': season.scene_season,
}
if search_term:
post_data['search'] = search_term
return json.dumps(post_data)
class HDBitsCache(tvcache.TVCache):
def _get_rss_data(self):
self.search_params = None # HDBits cache does not use search_params so set it to None
results = []
try:
parsed_json = self.provider.get_url(self.provider.urls['rss'], post_data=self.provider._make_post_data_JSON(), returns='json')
if self.provider._check_auth_from_data(parsed_json):
results = parsed_json['data']
except Exception:
pass
return {'entries': results}
provider = HDBitsProvider() | unknown | codeparrot/codeparrot-clean | ||
from django.test import TestCase
from django.test.utils import override_settings
from template_preprocess.processor import process_template_content
from template_preprocess.test import get_test_template_settings
template_settings = get_test_template_settings()
@override_settings(**template_settings)
class TestExtendBlock(TestCase):
def test_basic_block(self):
content = '{% include "extends/sub_template1.html" %}'
result = process_template_content(content)
correct = ('Before {% block inserted_content %}The Block'
'{%endblock inserted_content%} {% block block2 %}'
'Block 2{%endblock block2 %} {% block notreplaced %}'
'In wrapper{%endblock%} After ')
self.assertEquals(result, correct)
def test_extends_missing_template(self):
content = '{% include "extends/parent_is_missing.html" %}'
result = process_template_content(content)
self.assertEquals(result, content)
def test_recursive_extends(self):
content = '{% include "extends/recursive.html" %}'
result = process_template_content(content)
self.assertEquals(result, content)
def test_nested_blocks(self):
content = '{% include "extends/nested.html" %}'
result = process_template_content(content)
self.assertEquals(
result,
'{% block a %}{% block b %}{% endblock b %}{% endblock %} ')
def test_load_tag_outside_of_block(self):
content = '{% include "extends/load_tag_out_of_block.html" %}'
result = process_template_content(content)
correct = ('{% load another more from app.templatetags %}'
'{% load i18n %}Before {% block content %}'
'The content{% endblock %} After ')
self.assertEquals(result, correct)
def test_multiline_block(self):
content = '{% include "extends/multiline.html" %}'
result = process_template_content(content)
correct = 'Before {%block ok%}Line 1 Line 2{%endblock%} '
self.assertEquals(result, correct) | unknown | codeparrot/codeparrot-clean | ||
config:
allow-labels:
- test-fips
steps:
- label: part-5-fips-140-3
command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true -Dtests.fips.mode=140-3 checkPart5
timeout_in_minutes: 300
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2404
machineType: custom-32-98304
buildDirectory: /dev/shm/bk | unknown | github | https://github.com/elastic/elasticsearch | .buildkite/pipelines/pull-request/part-5-fips-140-3.yml |
#!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Compare results of two render_pictures runs.
TODO(epoger): Start using this module to compare ALL images (whether they
were generated from GMs or SKPs), and rename it accordingly.
"""
# System-level imports
import logging
import os
import shutil
import subprocess
import tempfile
import time
# Must fix up PYTHONPATH before importing from within Skia
import rs_fixpypath # pylint: disable=W0611
# Imports from within Skia
from py.utils import git_utils
from py.utils import gs_utils
from py.utils import url_utils
import buildbot_globals
import column
import gm_json
import imagediffdb
import imagepair
import imagepairset
import results
# URL under which all render_pictures images can be found in Google Storage.
#
# TODO(epoger): In order to allow live-view of GMs and other images, read this
# from the input summary files, or allow the caller to set it within the
# GET_live_results call.
DEFAULT_IMAGE_BASE_GS_URL = 'gs://' + buildbot_globals.Get('skp_images_bucket')
# Column descriptors, and display preferences for them.
COLUMN__RESULT_TYPE = results.KEY__EXTRACOLUMNS__RESULT_TYPE
COLUMN__SOURCE_SKP = 'sourceSkpFile'
COLUMN__TILED_OR_WHOLE = 'tiledOrWhole'
COLUMN__TILENUM = 'tilenum'
COLUMN__BUILDER_A = 'builderA'
COLUMN__RENDER_MODE_A = 'renderModeA'
COLUMN__BUILDER_B = 'builderB'
COLUMN__RENDER_MODE_B = 'renderModeB'
# Known values for some of those columns.
COLUMN__TILED_OR_WHOLE__TILED = 'tiled'
COLUMN__TILED_OR_WHOLE__WHOLE = 'whole'
FREEFORM_COLUMN_IDS = [
COLUMN__SOURCE_SKP,
COLUMN__TILENUM,
]
ORDERED_COLUMN_IDS = [
COLUMN__RESULT_TYPE,
COLUMN__SOURCE_SKP,
COLUMN__TILED_OR_WHOLE,
COLUMN__TILENUM,
COLUMN__BUILDER_A,
COLUMN__RENDER_MODE_A,
COLUMN__BUILDER_B,
COLUMN__RENDER_MODE_B,
]
# A special "repo:" URL type that we use to refer to Skia repo contents.
# (Useful for comparing against expectations files we store in our repo.)
REPO_URL_PREFIX = 'repo:'
REPO_BASEPATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
# Which sections within a JSON summary file can contain results.
ALLOWED_SECTION_NAMES = [
gm_json.JSONKEY_ACTUALRESULTS,
gm_json.JSONKEY_EXPECTEDRESULTS,
]
class RenderedPicturesComparisons(results.BaseComparisons):
"""Loads results from multiple render_pictures runs into an ImagePairSet.
"""
def __init__(self,
setA_dir, setB_dir,
setA_section, setB_section,
image_diff_db,
image_base_gs_url=DEFAULT_IMAGE_BASE_GS_URL, diff_base_url=None,
setA_label=None, setB_label=None,
gs=None, truncate_results=False, prefetch_only=False,
download_all_images=False):
"""Constructor: downloads images and generates diffs.
Once the object has been created (which may take a while), you can call its
get_packaged_results_of_type() method to quickly retrieve the results...
unless you have set prefetch_only to True, in which case we will
asynchronously warm up the ImageDiffDB cache but not fill in self._results.
Args:
setA_dir: root directory to copy all JSON summaries from, and to use as
setA within the comparisons. This directory may be specified as a
gs:// URL, special "repo:" URL, or local filepath.
setB_dir: root directory to copy all JSON summaries from, and to use as
setB within the comparisons. This directory may be specified as a
gs:// URL, special "repo:" URL, or local filepath.
setA_section: which section within setA to examine; must be one of
ALLOWED_SECTION_NAMES
setB_section: which section within setB to examine; must be one of
ALLOWED_SECTION_NAMES
image_diff_db: ImageDiffDB instance
image_base_gs_url: "gs://" URL pointing at the Google Storage bucket/dir
under which all render_pictures result images can
be found; this will be used to read images for comparison within
this code, and included in the ImagePairSet (as an HTTP URL) so its
consumers know where to download the images from
diff_base_url: base URL within which the client should look for diff
images; if not specified, defaults to a "file:///" URL representation
of image_diff_db's storage_root
setA_label: description to use for results in setA; if None, will be
set to a reasonable default
setB_label: description to use for results in setB; if None, will be
set to a reasonable default
gs: instance of GSUtils object we can use to download summary files
truncate_results: FOR MANUAL TESTING: if True, truncate the set of images
we process, to speed up testing.
prefetch_only: if True, return the new object as quickly as possible
with empty self._results (just queue up all the files to process,
don't wait around for them to be processed and recorded); otherwise,
block until the results have been assembled and recorded in
self._results.
download_all_images: if True, download all images, even if we don't
need them to generate diffs. This will take much longer to complete,
but is useful for warming up the bitmap cache on local disk.
"""
super(RenderedPicturesComparisons, self).__init__()
self._image_diff_db = image_diff_db
self._image_base_gs_url = image_base_gs_url
self._diff_base_url = (
diff_base_url or
url_utils.create_filepath_url(image_diff_db.storage_root))
self._gs = gs
self.truncate_results = truncate_results
self._prefetch_only = prefetch_only
self._download_all_images = download_all_images
# If we are comparing two different section types, we can use those
# as the default labels for setA and setB.
if setA_section != setB_section:
self._setA_label = setA_label or setA_section
self._setB_label = setB_label or setB_section
else:
self._setA_label = setA_label or 'setA'
self._setB_label = setB_label or 'setB'
tempdir = tempfile.mkdtemp()
try:
setA_root = os.path.join(tempdir, 'setA')
setB_root = os.path.join(tempdir, 'setB')
# TODO(stephana): There is a potential race condition here... we copy
# the contents out of the source_dir, and THEN we get the commithash
# of source_dir. If source_dir points at a git checkout, and that
# checkout is updated (by a different thread/process) during this
# operation, then the contents and commithash will be out of sync.
self._copy_dir_contents(source_dir=setA_dir, dest_dir=setA_root)
setA_repo_revision = self._get_repo_revision(source_dir=setA_dir)
self._copy_dir_contents(source_dir=setB_dir, dest_dir=setB_root)
setB_repo_revision = self._get_repo_revision(source_dir=setB_dir)
self._setA_descriptions = {
results.KEY__SET_DESCRIPTIONS__DIR: setA_dir,
results.KEY__SET_DESCRIPTIONS__REPO_REVISION: setA_repo_revision,
results.KEY__SET_DESCRIPTIONS__SECTION: setA_section,
}
self._setB_descriptions = {
results.KEY__SET_DESCRIPTIONS__DIR: setB_dir,
results.KEY__SET_DESCRIPTIONS__REPO_REVISION: setB_repo_revision,
results.KEY__SET_DESCRIPTIONS__SECTION: setB_section,
}
time_start = int(time.time())
self._results = self._load_result_pairs(
setA_root=setA_root, setB_root=setB_root,
setA_section=setA_section, setB_section=setB_section)
if self._results:
self._timestamp = int(time.time())
logging.info('Number of download file collisions: %s' %
imagediffdb.global_file_collisions)
logging.info('Results complete; took %d seconds.' %
(self._timestamp - time_start))
finally:
shutil.rmtree(tempdir)
def _load_result_pairs(self, setA_root, setB_root,
setA_section, setB_section):
"""Loads all JSON image summaries from 2 directory trees and compares them.
TODO(stephana): This method is only called from within __init__(); it might
make more sense to just roll the content of this method into __init__().
Args:
setA_root: root directory containing JSON summaries of rendering results
setB_root: root directory containing JSON summaries of rendering results
setA_section: which section (gm_json.JSONKEY_ACTUALRESULTS or
gm_json.JSONKEY_EXPECTEDRESULTS) to load from the summaries in setA
setB_section: which section (gm_json.JSONKEY_ACTUALRESULTS or
gm_json.JSONKEY_EXPECTEDRESULTS) to load from the summaries in setB
Returns the summary of all image diff results (or None, depending on
self._prefetch_only).
"""
logging.info('Reading JSON image summaries from dirs %s and %s...' % (
setA_root, setB_root))
setA_dicts = self.read_dicts_from_root(setA_root)
setB_dicts = self.read_dicts_from_root(setB_root)
logging.info('Comparing summary dicts...')
all_image_pairs = imagepairset.ImagePairSet(
descriptions=(self._setA_label, self._setB_label),
diff_base_url=self._diff_base_url)
failing_image_pairs = imagepairset.ImagePairSet(
descriptions=(self._setA_label, self._setB_label),
diff_base_url=self._diff_base_url)
# Override settings for columns that should be filtered using freeform text.
for column_id in FREEFORM_COLUMN_IDS:
factory = column.ColumnHeaderFactory(
header_text=column_id, use_freeform_filter=True)
all_image_pairs.set_column_header_factory(
column_id=column_id, column_header_factory=factory)
failing_image_pairs.set_column_header_factory(
column_id=column_id, column_header_factory=factory)
all_image_pairs.ensure_extra_column_values_in_summary(
column_id=COLUMN__RESULT_TYPE, values=[
results.KEY__RESULT_TYPE__FAILED,
results.KEY__RESULT_TYPE__NOCOMPARISON,
results.KEY__RESULT_TYPE__SUCCEEDED,
])
failing_image_pairs.ensure_extra_column_values_in_summary(
column_id=COLUMN__RESULT_TYPE, values=[
results.KEY__RESULT_TYPE__FAILED,
results.KEY__RESULT_TYPE__NOCOMPARISON,
])
logging.info('Starting to add imagepairs to queue.')
self._image_diff_db.log_queue_size_if_changed(limit_verbosity=False)
union_dict_paths = sorted(set(setA_dicts.keys() + setB_dicts.keys()))
num_union_dict_paths = len(union_dict_paths)
dict_num = 0
for dict_path in union_dict_paths:
dict_num += 1
logging.info(
'Asynchronously requesting pixel diffs for dict #%d of %d, "%s"...' %
(dict_num, num_union_dict_paths, dict_path))
dictA = self.get_default(setA_dicts, None, dict_path)
self._validate_dict_version(dictA)
dictA_results = self.get_default(dictA, {}, setA_section)
dictB = self.get_default(setB_dicts, None, dict_path)
self._validate_dict_version(dictB)
dictB_results = self.get_default(dictB, {}, setB_section)
image_A_base_url = self.get_default(
setA_dicts, self._image_base_gs_url, dict_path,
gm_json.JSONKEY_IMAGE_BASE_GS_URL)
image_B_base_url = self.get_default(
setB_dicts, self._image_base_gs_url, dict_path,
gm_json.JSONKEY_IMAGE_BASE_GS_URL)
# get the builders and render modes for each set
builder_A = self.get_default(dictA, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_BUILDER)
render_mode_A = self.get_default(dictA, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_RENDER_MODE)
builder_B = self.get_default(dictB, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_BUILDER)
render_mode_B = self.get_default(dictB, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_RENDER_MODE)
skp_names = sorted(set(dictA_results.keys() + dictB_results.keys()))
# Just for manual testing... truncate to an arbitrary subset.
if self.truncate_results:
skp_names = skp_names[1:3]
for skp_name in skp_names:
imagepairs_for_this_skp = []
whole_image_A = self.get_default(
dictA_results, None,
skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE)
whole_image_B = self.get_default(
dictB_results, None,
skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE)
imagepairs_for_this_skp.append(self._create_image_pair(
image_dict_A=whole_image_A, image_dict_B=whole_image_B,
image_A_base_url=image_A_base_url,
image_B_base_url=image_B_base_url,
builder_A=builder_A, render_mode_A=render_mode_A,
builder_B=builder_B, render_mode_B=render_mode_B,
source_json_file=dict_path,
source_skp_name=skp_name, tilenum=None))
tiled_images_A = self.get_default(
dictA_results, [],
skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES)
tiled_images_B = self.get_default(
dictB_results, [],
skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES)
if tiled_images_A or tiled_images_B:
num_tiles_A = len(tiled_images_A)
num_tiles_B = len(tiled_images_B)
num_tiles = max(num_tiles_A, num_tiles_B)
for tile_num in range(num_tiles):
imagepairs_for_this_skp.append(self._create_image_pair(
image_dict_A=(tiled_images_A[tile_num]
if tile_num < num_tiles_A else None),
image_dict_B=(tiled_images_B[tile_num]
if tile_num < num_tiles_B else None),
image_A_base_url=image_A_base_url,
image_B_base_url=image_B_base_url,
builder_A=builder_A, render_mode_A=render_mode_A,
builder_B=builder_B, render_mode_B=render_mode_B,
source_json_file=dict_path,
source_skp_name=skp_name, tilenum=tile_num))
for one_imagepair in imagepairs_for_this_skp:
if one_imagepair:
all_image_pairs.add_image_pair(one_imagepair)
result_type = one_imagepair.extra_columns_dict\
[COLUMN__RESULT_TYPE]
if result_type != results.KEY__RESULT_TYPE__SUCCEEDED:
failing_image_pairs.add_image_pair(one_imagepair)
logging.info('Finished adding imagepairs to queue.')
self._image_diff_db.log_queue_size_if_changed(limit_verbosity=False)
if self._prefetch_only:
return None
else:
return {
results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict(
column_ids_in_order=ORDERED_COLUMN_IDS),
results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict(
column_ids_in_order=ORDERED_COLUMN_IDS),
}
def _validate_dict_version(self, result_dict):
"""Raises Exception if the dict is not the type/version we know how to read.
Args:
result_dict: dictionary holding output of render_pictures; if None,
this method will return without raising an Exception
"""
# TODO(stephana): These values should be defined as constants somewhere,
# to be kept in sync between this file and writable_expectations.py
expected_header_type = 'ChecksummedImages'
expected_header_revision = 1
if result_dict == None:
return
header = result_dict[gm_json.JSONKEY_HEADER]
header_type = header[gm_json.JSONKEY_HEADER_TYPE]
if header_type != expected_header_type:
raise Exception('expected header_type "%s", but got "%s"' % (
expected_header_type, header_type))
header_revision = header[gm_json.JSONKEY_HEADER_REVISION]
if header_revision != expected_header_revision:
raise Exception('expected header_revision %d, but got %d' % (
expected_header_revision, header_revision))
def _create_image_pair(self, image_dict_A, image_dict_B,
image_A_base_url, image_B_base_url,
builder_A, render_mode_A,
builder_B, render_mode_B,
source_json_file,
source_skp_name, tilenum):
"""Creates an ImagePair object for this pair of images.
Args:
image_dict_A: dict with JSONKEY_IMAGE_* keys, or None if no image
image_dict_B: dict with JSONKEY_IMAGE_* keys, or None if no image
image_A_base_url: base URL for image A
image_B_base_url: base URL for image B
builder_A: builder that created image set A or None if unknow
render_mode_A: render mode used to generate image set A or None if
unknown.
builder_B: builder that created image set A or None if unknow
render_mode_B: render mode used to generate image set A or None if
unknown.
source_json_file: string; relative path of the JSON file where this
result came from, within setA and setB.
source_skp_name: string; name of the source SKP file
tilenum: which tile, or None if a wholeimage
Returns:
An ImagePair object, or None if both image_dict_A and image_dict_B are
None.
"""
if (not image_dict_A) and (not image_dict_B):
return None
def _checksum_and_relative_url(dic):
if dic:
return ((dic[gm_json.JSONKEY_IMAGE_CHECKSUMALGORITHM],
int(dic[gm_json.JSONKEY_IMAGE_CHECKSUMVALUE])),
dic[gm_json.JSONKEY_IMAGE_FILEPATH])
else:
return None, None
imageA_checksum, imageA_relative_url = _checksum_and_relative_url(
image_dict_A)
imageB_checksum, imageB_relative_url = _checksum_and_relative_url(
image_dict_B)
if not imageA_checksum:
result_type = results.KEY__RESULT_TYPE__NOCOMPARISON
elif not imageB_checksum:
result_type = results.KEY__RESULT_TYPE__NOCOMPARISON
elif imageA_checksum == imageB_checksum:
result_type = results.KEY__RESULT_TYPE__SUCCEEDED
else:
result_type = results.KEY__RESULT_TYPE__FAILED
extra_columns_dict = {
COLUMN__RESULT_TYPE: result_type,
COLUMN__SOURCE_SKP: source_skp_name,
COLUMN__BUILDER_A: builder_A,
COLUMN__RENDER_MODE_A: render_mode_A,
COLUMN__BUILDER_B: builder_B,
COLUMN__RENDER_MODE_B: render_mode_B,
}
if tilenum == None:
extra_columns_dict[COLUMN__TILED_OR_WHOLE] = COLUMN__TILED_OR_WHOLE__WHOLE
extra_columns_dict[COLUMN__TILENUM] = 'N/A'
else:
extra_columns_dict[COLUMN__TILED_OR_WHOLE] = COLUMN__TILED_OR_WHOLE__TILED
extra_columns_dict[COLUMN__TILENUM] = str(tilenum)
try:
return imagepair.ImagePair(
image_diff_db=self._image_diff_db,
imageA_base_url=image_A_base_url,
imageB_base_url=image_B_base_url,
imageA_relative_url=imageA_relative_url,
imageB_relative_url=imageB_relative_url,
extra_columns=extra_columns_dict,
source_json_file=source_json_file,
download_all_images=self._download_all_images)
except (KeyError, TypeError):
logging.exception(
'got exception while creating ImagePair for'
' urlPair=("%s","%s"), source_skp_name="%s", tilenum="%s"' % (
imageA_relative_url, imageB_relative_url, source_skp_name,
tilenum))
return None
def _copy_dir_contents(self, source_dir, dest_dir):
"""Copy all contents of source_dir into dest_dir, recursing into subdirs.
Args:
source_dir: path to source dir (GS URL, local filepath, or a special
"repo:" URL type that points at a file within our Skia checkout)
dest_dir: path to destination dir (local filepath)
The copy operates as a "merge with overwrite": any files in source_dir will
be "overlaid" on top of the existing content in dest_dir. Existing files
with the same names will be overwritten.
"""
if gs_utils.GSUtils.is_gs_url(source_dir):
(bucket, path) = gs_utils.GSUtils.split_gs_url(source_dir)
self._gs.download_dir_contents(source_bucket=bucket, source_dir=path,
dest_dir=dest_dir)
elif source_dir.lower().startswith(REPO_URL_PREFIX):
repo_dir = os.path.join(REPO_BASEPATH, source_dir[len(REPO_URL_PREFIX):])
shutil.copytree(repo_dir, dest_dir)
else:
shutil.copytree(source_dir, dest_dir)
def _get_repo_revision(self, source_dir):
"""Get the commit hash of source_dir, IF it refers to a git checkout.
Args:
source_dir: path to source dir (GS URL, local filepath, or a special
"repo:" URL type that points at a file within our Skia checkout;
only the "repo:" URL type will have a commit hash.
"""
if source_dir.lower().startswith(REPO_URL_PREFIX):
repo_dir = os.path.join(REPO_BASEPATH, source_dir[len(REPO_URL_PREFIX):])
return subprocess.check_output(
args=[git_utils.GIT, 'rev-parse', 'HEAD'], cwd=repo_dir).strip()
else:
return None | unknown | codeparrot/codeparrot-clean | ||
import click
import click_shell
# Test shell decorator
def test_shell_decorator(cli_runner):
@click_shell.shell(prompt='app# ')
def app():
pass
result = cli_runner.invoke(app)
assert result.output == 'app# \n'
# Test with one command
def test_command_decorator(cli_runner):
@click_shell.shell(prompt='app$ ')
def app_one_command():
pass
@app_one_command.command()
def printer():
click.echo('printed')
result = cli_runner.invoke(app_one_command, input='printer\n')
# Verify the context key dictionary 'param' is printed
assert result.output == 'app$ printed\napp$ \n'
# Test with finisher
def test_on_finished(cli_runner):
def finisher(ctx):
click.echo(ctx.obj['param'])
@click_shell.shell(prompt='app> ', on_finished=finisher)
def app_with_finisher():
pass
result = cli_runner.invoke(app_with_finisher, obj={'param': 'value'})
# Verify the context key dictionary 'param' is printed
assert result.output == 'app> \nvalue\n' | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python3
'''!
@file test_epcalcwin.py
@package test_epcalcwin.py
@brief This is a little tool for tracking the EPs of Role Master Characters
@date (C) 2015-2021
@author Marcus Schwamberger
@email marcus@lederzeug.de
@license GNU V3.0
@version 1.2.0
----
@todo
- adding a RR window
- adding a cat/skill checker window for the whole group (select skill once and check for all)
- adding spellcasting windpw (stat man)
'''
import os
import json
from gui.window import *
from rpgtoolbox import epcalc, rpgtools as rpg
from rpgToolDefinitions.epcalcdefs import maneuvers
from pprint import pprint
from rpgToolDefinitions.helptools import RMDice as dice
from tkinter import filedialog
import re
import pickle
class EPCalcWin(blankWindow):
"""!
This is a GUI for EP calculation for your character party.
"""
def __init__(self, lang = "en", charlist = [], storepath = "./data"):
"""!
Class constructor
@param lang The chosen language for window's and button's
texts. At the moment, only English (en, default
value) and German (de) are supported.
@param charlist list of dictionaries holding: player, charname, EPs
@param storepath path for storing the data into the character files.
"""
self.lang = lang
self.charlist = charlist
if self.charlist == []:
self.charlist.append({ "player": "Marcus",
"exp": 10000,
"prof": "Ranger",
"name": "Player1"
})
self.storepath = storepath
blankWindow.__init__(self, self.lang)
self.window.title("EP Calculator")
self.__addMenu()
self.__addHelpMenu()
self.__buildWin()
self.__loadAutosave()
self.window.mainloop()
def __addMenu(self):
'''!
This methods adds the menu bar to the window
'''
self.filemenu = Menu(master = self.menu)
self.menu.add_cascade(label = txtmenu['menu_file'][self.lang],
menu = self.filemenu)
self.filemenu.add_command(label = submenu['file'][self.lang]['open'],
command = self.__open)
self.filemenu.add_command(label = submenu['file'][self.lang]['save'],
command = self.__save)
self.filemenu.add_separator()
self.filemenu.add_command(label = submenu['file'][self.lang]['close'],
command = self.__quit)
def __addHelpMenu(self):
"""!
This methods defines a help menu.
"""
self.helpmenu = Menu(master = self.menu)
self.menu.add_cascade(label = txtmenu['help'][self.lang],
menu = self.helpmenu)
self.helpmenu.add_separator()
self.helpmenu.add_command(label = submenu['help'][self.lang]['about'],
command = self._helpAbout)
def __buildWin(self):
"""!
This method builds the window content.
"""
## \var self.players
# list of given player names
self.players = []
## \var self.group
# dictionary of EP objects per player
self.group = {}
for elem in self.charlist:
self.players.append(elem["player"])
self.group[elem["player"]] = epcalc.experience(elem["player"], elem["exp"])
self.group[elem["player"]].updateInfo()
self.__selecPlayer = StringVar(self.window)
self.__selecPlayer.set(self.players[0])
self.__playerOpt = OptionMenu(self.window,
self.__selecPlayer,
*self.players,
command = self.__updSelec)
self.__playerOpt.grid(column = 0, row = 0, sticky = "W")
self.__charname = StringVar()
self.__charname.set(self.charlist[0]["name"])
Label(self.window,
width = 20,
textvariable = self.__charname,
).grid(row = 0, column = 1, sticky = "W")
self.__charprof = StringVar()
self.__charprof.set("{} ({})".format(self.charlist[0]["prof"], self.group[self.charlist[0]["player"]].lvl))
Label(self.window,
width = 15,
textvariable = self.__charprof,
).grid(row = 0, column = 2, sticky = "W")
self.__charexp = StringVar()
self.__charexp.set(str(self.charlist[0]["exp"]))
Label(self.window,
width = 15,
textvariable = self.__charexp,
).grid(row = 0, column = 3, sticky = "W")
self.__gained = StringVar()
self.__gained.set("+{}".format(self.group[self.charlist[0]["player"]].gainedep))
Label(self.window,
width = 10,
textvariable = self.__gained,
).grid(row = 0, column = 4, sticky = "W")
self.__newep = StringVar()
self.__newep.set("<{}>".format(self.group[self.charlist[0]["player"]].gainedep + self.group[self.charlist[0]["player"]].ep))
Label(self.window,
width = 10,
textvariable = self.__newep,
).grid(row = 0, column = 5, sticky = "W")
self.__newlvl = IntVar()
self.__newlvl.set(self.group[self.charlist[0]["player"]].lvl)
Label(self.window,
width = 10,
textvariable = self.__newlvl,
).grid(row = 0, column = 6, sticky = "EW")
#row 1
Label(self.window,
text = s_elem_def['MANEUVER'][self.lang] + ":",
).grid(row = 1, column = 0, sticky = "EW")
self.manlist = list(maneuvers.keys())
self.__selecman = StringVar()
self.__selecman.set(self.manlist[0])
self.__manOpt = OptionMenu(self.window,
self.__selecman,
*self.manlist
)
self.__manOpt.grid(row = 1, column = 1, sticky = "EW")
Label(self.window,
text = s_elem_def["COUNT"][self.lang] + ":"
).grid(row = 1, column = 2, sticky = "EW")
self.__cMan = IntVar()
self.__cMan.set(0)
Entry(self.window,
justify = "center",
textvariable = self.__cMan,
).grid(row = 1, column = 3, sticky = "EW")
Button(self.window,
text = txtbutton["but_add"][self.lang],
command = self.__calcMan
).grid(row = 1, column = 4, sticky = "EW")
Button(self.window,
text = labels["win_man"][self.lang],
command = self.__callManWin
).grid(row = 1, column = 6, sticky = "EW")
#row 2
Label(self.window,
text = s_elem_def["SPELL"][self.lang] + ":",
).grid(row = 2, column = 0, sticky = "W")
self.__lvlSpell = IntVar()
self.__lvlSpell.set(1)
Entry(self.window,
justify = "center",
textvariable = self.__lvlSpell
).grid(row = 2, column = 1, sticky = "EW")
Label(self.window,
text = s_elem_def["COUNT"][self.lang] + ":"
).grid(row = 2, column = 2, sticky = "EW")
self.__cSpell = IntVar()
self.__cSpell.set(0)
Entry(self.window,
justify = "center",
textvariable = self.__cSpell,
).grid(row = 2, column = 3, sticky = "EW")
Button(self.window,
text = txtbutton["but_add"][self.lang],
command = self.__calcSpell
).grid(row = 2, column = 4, sticky = "EW")
Button(self.window,
text = labels["win_casting"][self.lang],
command = self.notdoneyet
).grid(row = 2, column = 6, sticky = "EW")
#row 3
self.critlist = ['T', 'A', 'B', 'C', 'D', 'E', "KILL"]
Label(self.window,
text = s_elem_def["H_CRITS"][self.lang] + ":",
).grid(row = 3, column = 0, sticky = "W")
self.__gcrit = StringVar()
self.__gcrit.set("T")
OptionMenu(self.window,
self.__gcrit,
*self.critlist
).grid(row = 3, column = 1, sticky = "EW")
Label(self.window,
text = s_elem_def["HITS"][self.lang] + ":",
).grid(row = 3, column = 2, sticky = "W")
self.__hits = IntVar()
self.__hits.set(0)
Entry(self.window,
justify = "center",
textvariable = self.__hits,
).grid(row = 3, column = 3, sticky = "EW")
Button(self.window,
text = txtbutton["but_add"][self.lang],
command = self.__calcGCrit
).grid(row = 3, column = 4, sticky = "EW")
Button(self.window,
text = labels["win_fight"][self.lang],
command = self.notdoneyet
).grid(row = 3, column = 6, rowspan = 2, sticky = "NEWS")
#row 4
Label(self.window,
text = s_elem_def["CRITICAL"][self.lang] + ":",
).grid(row = 4, column = 0, sticky = "W")
self.__crit = StringVar()
self.__crit.set("T")
OptionMenu(self.window,
self.__crit,
*self.critlist
).grid(row = 4, column = 1, sticky = "EW")
Label(self.window,
text = labels["lvl_enemy"][self.lang] + ":",
).grid(row = 4, column = 2, sticky = "WE")
self.__lvlenem = IntVar()
self.__lvlenem.set(0)
Entry(self.window,
justify = "center",
textvariable = self.__lvlenem,
).grid(row = 4, column = 3, sticky = "EW")
Button(self.window,
text = txtbutton["but_add"][self.lang],
command = self.__calcCrit
).grid(row = 4, column = 4, sticky = "EW")
#row 5
Label(self.window,
text = s_elem_def["TRAVEL"][self.lang] + ":",
).grid(row = 5, column = 0, sticky = "W")
self.__travel = IntVar()
self.__travel.set(0)
Entry(self.window,
justify = "center",
textvariable = self.__travel,
).grid(row = 5, column = 1, sticky = "EW")
Label(self.window,
text = labels["comment"][self.lang] + ":",
).grid(row = 5, column = 2, sticky = "EW")
self.__comtravel = StringVar()
self.__comtravel.set("")
Entry(self.window,
justify = "center",
textvariable = self.__comtravel,
).grid(row = 5, column = 3, sticky = "EW")
Button(self.window,
text = txtbutton["but_add"][self.lang],
command = self.__calcTravel
).grid(row = 5, column = 4, sticky = "EW")
Button(self.window,
text = labels["diary"][self.lang],
command = self.notdoneyet
).grid(row = 5, column = 6, sticky = "EW")
#row 6
Label(self.window,
text = s_elem_def["IDEAS"][self.lang] + ":",
).grid(row = 6, column = 0, sticky = "W")
self.__ideas = IntVar()
self.__ideas.set(0)
Entry(self.window,
justify = "center",
textvariable = self.__ideas,
).grid(row = 6, column = 1, sticky = "EW")
Label(self.window,
text = labels["comment"][self.lang] + ":",
).grid(row = 6, column = 2, sticky = "EW")
self.__comideas = StringVar()
self.__comideas.set("")
Entry(self.window,
justify = "center",
textvariable = self.__comideas,
).grid(row = 6, column = 3, sticky = "EW")
Button(self.window,
text = txtbutton["but_add"][self.lang],
command = self.__calcIdeas
).grid(row = 6, column = 4, sticky = "EW")
Button(self.window,
text = txtbutton["but_fin"][self.lang],
command = self.__finalize,
bg = "grey",
fg = "white"
).grid(row = 6, column = 6, sticky = "EW")
def __updDispay(self, curPlayer = ""):
'''!
Updates display of current player
@param curPlayer name of selected player
'''
self.group[curPlayer].updateInfo()
self.__gained.set("+{}".format(self.group[curPlayer].gainedep))
self.__newep.set("<{}>".format(self.group[curPlayer].newep))
self.__newlvl.set(self.group[curPlayer].newlvl)
def __updSelec(self, event):
"""!
Update selected Player data
"""
selected = self.__selecPlayer.get()
ind = self.players.index(selected)
self.__charname.set(self.charlist[ind]["name"])
self.__charprof.set("{} ({})".format(self.charlist[ind]["prof"], self.group[selected].lvl))
self.__charexp.set(str(self.charlist[ind]["exp"]))
self.__gained.set("+{}".format(self.group[self.charlist[ind]["player"]].gainedep))
self.__newep.set("<{}>".format(self.group[self.charlist[ind]["player"]].gainedep + self.group[self.charlist[ind]["player"]].newep))
self.__newlvl.set(self.group[self.charlist[ind]["player"]].newlvl)
def __autoSave(self):
"""!
This function is for aut saving the group object in case of a program / computer crash
"""
with open("autosave.pkl", "wb") as fp:
pickle.dump(self.group, fp)
def __loadAutosave(self):
"""!
This loads an autosave file if there is any
"""
if os.path.exists("autosave.pkl"):
with open("autosave.pkl", "rb") as fp:
self.group = pickle.load(fp)
os.remove("autosave.pkl")
def __grpBonus(self):
'''!
This methods calculates the group bonus while finalize process.
'''
grpbonus = 0
for name in self.group.keys():
grpbonus += self.group[name].gainedep
grpbonus = int(round(grpbonus / len(self.group.keys())))
for name in self.group.keys():
self.group[name].gainedep += grpbonus
self.group[name].updateInfo()
self.__autoSave()
def __calcMan(self):
'''!
This computes EPs for each successful maneuver and add them to character's
gained EPs
'''
curPlayer = self.__selecPlayer.get()
curManLvl = self.__selecman.get()
number = self.__cMan.get()
self.group[curPlayer].maneuver(curManLvl, number)
self.__autoSave()
self.__updDispay(curPlayer)
def __calcSpell(self):
'''!
This computes EPs for a given number aof spells aof the same level
'''
curPlayer = self.__selecPlayer.get()
spellLvl = self.__lvlSpell.get()
spellNo = self.__cSpell.get()
self.group[curPlayer].spell(spellLvl, spellNo)
self.__autoSave()
self.__updDispay(curPlayer)
def __calcGCrit(self):
'''!
This calculates EP for gained criticals and hits
'''
curPlayer = self.__selecPlayer.get()
gCrit = self.__gcrit.get()
hits = self.__hits.get()
if gCrit in ["T", "KILL"]:
self.group[curPlayer].gainedHits(hits)
else:
self.group[curPlayer].gainedHits(hits)
self.group[curPlayer].gainedCrit(gCrit, 1)
self.__autoSave()
self.__updDispay(curPlayer)
def __calcCrit(self):
'''!
This calculates EP for caused criticals against an enemy of a certain level
'''
curPlayer = self.__selecPlayer.get()
crit = self.__crit.get()
lvlEnem = self.__lvlenem.get()
if crit not in ["T", "KILL"]:
self.group[curPlayer].hitCrit(crit, lvlEnem, 1)
elif crit == "KILL":
self.group[curPlayer].killedNPC(lvlEnem, 1)
self.__autoSave()
self.__updDispay(curPlayer)
def __calcTravel(self):
'''!
Travelled EPs
----
@todo The comments have to be added to the character's diary
'''
curPlayer = self.__selecPlayer.get()
travel = self.__travel.get()
comm = self.__comtravel.get()
self.group[curPlayer].travelled(travel)
self.__autoSave()
self.__updDispay(curPlayer)
def __calcIdeas(self):
'''!
EPs for ideas and role-playing
----
@todo The comments have to be added to the character's diary
'''
curPlayer = self.__selecPlayer.get()
ideas = self.__ideas.get()
comm = self.__comideas.get()
self.group[curPlayer].ideas(ideas)
self.__autoSave()
self.__updDispay(curPlayer)
def __finalize(self):
'''!
Do all finalizing steps:
-# adding new EPs to characters
-# open display window for whole group EPs
-# store new levels
- in character's files
- in group file
'''
self.__grpBonus()
for i in range(0, len(self.charlist)):
name = self.charlist[i]['player']
self.charlist[i]["exp"] = self.group[name].newep
self.charlist[i]['old_exp'] = self.group[name].ep
self.__autoSave()
gw = showGrpEP(self.charlist, self.storepath, self.lang)
def __callManWin(self):
'''!
Opens Maneuver Window for maneuver rolls
'''
who = self.__selecPlayer.get()
for elem in self.charlist:
if elem['player'] == who:
manWin(elem, self.lang)
def __save(self):
'''!
This opens a file dialog window for saving
'''
savedir = filedialog.asksaveasfilename(defaultextension = ".json", filetypes = [("Char Group Files", ".json")])
with open(savedir, "w") as fp:
json.dump(self.charlist, fp, indent = 4)
def __open(self):
'''!
This opens a file dialog window for opening a group file.
'''
opendir = filedialog.askopenfilename(defaultextension = ".json", filetypes = [("Char Group Files", ".json")])
with open(opendir, "r") as fp:
self.charlist = json.load(fp)
#set up new player group list
self.players = []
self.group = {}
for elem in self.charlist:
self.players.append(elem["player"])
self.group[elem["player"]] = epcalc.experience(elem["player"], elem["exp"])
self.group[elem["player"]].updateInfo()
self.__selecPlayer.set(self.players[0])
self.__playerOpt = OptionMenu(self.window,
self.__selecPlayer,
*self.players,
command = self.__updSelec)
self.__playerOpt.grid(column = 0, row = 0, sticky = "W")
def __quit(self):
'''!
This method closes the window
'''
self.window.destroy()
class showGrpEP(object):
'''!
Display and save window for group EPs
'''
def __init__(self, charlist = [], storepath = "./data", lang = 'en'):
"""!
Constructor
@param lang contains the chosen display language.
"""
self.lang = lang
self.storepath = storepath
self.charlist = charlist
self.window = Toplevel()
self.title = wintitle["rm_groupEP"][self.lang]
self.window.title(self.title)
for i in range(0, len(self.charlist)):
Label(self.window,
text = "{} ({}):".format(self.charlist[i]["player"],
self.charlist[i]["name"])
).grid(row = i, column = 0, sticky = "EW")
Label(self.window,
text = "+{} -> {}".format(self.charlist[i]["exp"] - self.charlist[i]["old_exp"],
self.charlist[i]['exp'])
).grid(row = i, column = 1, sticky = "EW")
Button(self.window,
text = txtbutton["but_save_char"][self.lang],
command = self.saveChars,
bg = "grey",
fg = "white"
).grid(row = i + 1, column = 0, sticky = "NEWS")
Button(self.window,
text = txtbutton["but_save_grp"][self.lang],
command = self.saveGroup
).grid(row = i + 1, column = 1, sticky = "NEWS")
self.window.mainloop()
def saveChars(self):
'''!
This saves all single characters separated from group
'''
if self.storepath[-1] != "/":
self.storepath += "/"
if os.path.exists(self.storepath):
for char in self.charlist:
charpath = self.storepath + "/" + char["player"] + "/"
if os.path.exists(charpath):
with open(charpath + char['name'] + ".json", "w") as fp:
json.dump(char, fp, indent = 4)
print("data saved to {}".format(charpath + char['name'] + ".json"))
else:
print("{} not found -> {}".format(charpath, os.getcwd()))
# if os.path.exists("autosave.pkl"):
# os.remove("autosave.pkl")
def saveGroup(self):
'''!
Saves all data in a groupfile
'''
savedir = filedialog.asksaveasfilename(defaultextension = ".json", filetypes = [("Char Group Files", ".json")])
with open(savedir, "w") as fp:
json.dump(self.charlist, fp, indent = 4)
if os.path.exists("autosave.pkl"):
os.remove("autosave.pkl")
class manWin(object):
'''
Maneuver Window
'''
def __init__(self, character = {}, lang = "en"):
'''!
Constructor
@param character whole character daa
@param lang interface language; default: English
'''
self.character = character
self.lang = lang
self.man_ep = 0
## @var self.total
# total result of skill check
self.total = 0
self.category = "Armor - Heavy"
self.skill = ""
self.man = "routine"
self.maneuver = maneuvers
self.mantab = rpg.statManeuver()
self.dice = dice
self.window = Toplevel()
self.title = "{}: {} - {}".format(wintitle['rm_maneuver'][self.lang], self.character['player'], self.character['name'])
self.window.title(self.title)
self._buildwin()
self.window.mainloop()
def _buildwin(self):
'''
This defines the different element in the window layout
'''
# row 0
Label(self.window,
text = self.character["player"]
).grid(row = 0, column = 0, sticky = "NEWS")
Label(self.window,
text = self.character["name"]
).grid(row = 0, column = 2, sticky = "NEWS")
Label(self.window,
text = "{}/{} ({})".format(self.character["race"],
self.character["prof"],
self.character["lvl"])
).grid(row = 0, column = 4, sticky = "NEWS")
from PIL import Image, ImageTk
self.cpic = ImageTk.PhotoImage(Image.open(self.character["piclink"]).resize((210, 210), Image.ANTIALIAS))
self.picLabel = Label(master = self.window,
image = self.cpic
)
self.picLabel.grid(column = 5,
row = 0,
columnspan = 2,
rowspan = 8,
sticky = "NEWS",
padx = 5,
pady = 5)
# row 1
vcscroll = Scrollbar(self.window, orient = VERTICAL)
self.catlb = Listbox(self.window,
yscrollcommand = vcscroll.set,
width = 30,
height = 5)
vcscroll.config(command = self.catlb.yview)
vcscroll.grid(row = 1, column = 1, sticky = "WNS")
self.catlb.grid(row = 1, column = 0, sticky = "NEW")
for cat in self.character["cat"].keys():
self.catlb.insert(END, cat)
self.catlb.bind("<Button-1>", self._fillSkill)
vsscroll = Scrollbar(self.window, orient = VERTICAL)
self.skilllb = Listbox(self.window,
yscrollcommand = vsscroll.set,
width = 30,
height = 5)
vsscroll.config(command = self.skilllb.yview)
vsscroll.grid(row = 1, column = 3, sticky = "WNS")
self.skilllb.grid(row = 1, column = 2, sticky = "NEW")
for skill in self.character["cat"]["Armor - Heavy"]["Skill"].keys():
if skill not in ["Progression", "Stats"] and skill[-1] != "+":
self.skilllb.insert(END, skill)
self.skilllb.bind("<Button-1>", self._getSkill)
vmscroll = Scrollbar(self.window, orient = VERTICAL)
self.manlb = Listbox(self.window,
yscrollcommand = vmscroll.set,
height = 5)
vmscroll.config(command = self.manlb.yview)
vmscroll.grid(row = 1, column = 5, sticky = "WNS")
self.manlb.grid(row = 1, column = 4, sticky = "NEW")
for skill in self.maneuver.keys():
self.manlb.insert(END, skill)
self.manlb.bind("<Button-1>", self._getMan)
# row 2
self.catlabel = Label(self.window,
text = self.category)
self.catlabel.grid(row = 2, column = 0, sticky = "EWS")
self.skilllabel = Label(self.window,
text = self.skill)
self.skilllabel.grid(row = 2, column = 2, sticky = "EWS")
self.manlabel = Label(self.window,
text = self.man)
self.manlabel.grid(row = 2, column = 4, sticky = "EWS")
#row 3
Label(self.window,
text = "+ {}:".format(labels['modifier'][self.lang])
).grid(row = 3, column = 0, sticky = "NEWS")
Button(self.window,
text = txtbutton["but_roll"][self.lang],
command = self._rollDice,
bg = "grey",
fg = "white",
# image = "./data/default/pics/d10.png"
).grid(row = 3, column = 2, sticky = "NEWS")
Button(self.window,
text = txtbutton["but_result"][self.lang],
command = self._chkResult
).grid(row = 3, column = 4, sticky = "NEWS")
# row 4
self.mod = IntVar()
self.mod.set(0)
Entry(self.window,
justify = "center",
textvariable = self.mod
).grid(row = 4, column = 0, sticky = "EW")
self.diceroll = StringVar()
self.diceroll.set("0")
Entry(self.window,
justify = "center",
textvariable = self.diceroll,
).grid(row = 4, column = 2, sticky = "EW")
self.totallabel = StringVar()
self.totallabel.set("--")
Label(self.window,
textvariable = self.totallabel,
justify = "center"
).grid(row = 4, column = 4, sticky = "NEWS")
# row 5
Label(self.window,
text = labels['class'][self.lang] + ":"
).grid(row = 5, column = 0, sticky = "NEWS")
Label(self.window,
text = labels['perc'][self.lang] + ":"
).grid(row = 5, column = 2, sticky = "NEWS")
Label(self.window,
text = labels['time'][self.lang] + ":"
).grid(row = 5, column = 4, sticky = "NEWS")
# row 6
self.classif = StringVar()
self.classif.set("--")
Label(self.window,
textvariable = self.classif,
).grid(row = 6, column = 0, sticky = "NEWS")
self.perc = StringVar()
self.perc.set("--")
Label(self.window,
textvariable = self.perc,
).grid(row = 6, column = 2, sticky = "NEWS")
self.timef = StringVar()
self.timef.set("--")
Label(self.window,
textvariable = self.timef,
).grid(row = 6, column = 4, sticky = "NEWS")
#row 7
Label(self.window,
text = labels['modifier'][self.lang] + ":"
).grid(row = 7, column = 0, sticky = "NEWS")
self.modif = StringVar()
self.modif.set("--")
Label(self.window,
textvariable = self.modif
).grid(row = 7, column = 2, sticky = "NEWS")
#row 8
self.desc = StringVar()
self.desc.set("")
Label(self.window,
wraplength = 700,
textvariable = self.desc
).grid(row = 8,
rowspan = 2,
column = 0,
columnspan = 5,
sticky = "NEWS")
def _fillSkill(self, event):
'''
Depending on the selected category fill the skill listbox
'''
self.skilllb.delete(0, END)
selcat = self.catlb.curselection()
self.catlabel.config(text = "")
if selcat != ():
self.category = self.catlb.get(selcat[0])
self.catlabel.config(text = self.category)
self.skilllabel.config(text = "")
for skill in self.character["cat"][self.category]["Skill"].keys():
if skill not in ["Progression", "Stats"] and skill[-1] != "+":
self.skilllb.insert(END, skill)
else:
print("-->".format(skill))
def _getSkill(self, event):
'''
Getting selected Skill
'''
selskill = self.skilllb.curselection()
if selskill != ():
self.skill = self.skilllb.get(selskill[0])
self.skilllabel.config(text = self.skill)
def _getMan(self, event):
'''
Get maneuver level
'''
selman = self.manlb.curselection()
if selman != ():
self.man = self.manlb.get(selman[0])
self.manlabel.configure(text = self.man)
print("{} - {} - {}".format(self.category, self.skill, self.man))
def _rollDice(self):
'''!
This trows a d100. Result ist ([dice result], [unmodified])
----
@todo set dice(rules="RM")
'''
self.result = self.dice()
if self.result[1] == []:
self.diceroll.set(str(self.result[0][0]))
else:
self.diceroll.set("um {}".format(self.result[1][0]))
def _chkResult(self):
'''
'''
dummy = self.diceroll.get()
um = re.compile(r"(um|Um|UM) ([0-9]{1,3})")
sr = re.compile(r"[0-9]+")
if um.match(dummy):
self.total = int(um.match(dummy).group(2))
elif sr.match(dummy):
self.total = int(sr.match(dummy).group())
else:
self.total = 0
if self.category:
skilladd = self.character["cat"][self.category]["total bonus"]
if self.skill:
skilladd = self.character["cat"][self.category]["Skill"][self.skill]["total bonus"]
mod = self.mod.get()
man = maneuvers[self.man]["mod"]
self.total += skilladd + mod + man
self.probe = self.mantab.checkRoll(self.total)
self._updTotal()
def _updTotal(self):
self.totallabel.set(str(self.total))
self.classif.set(self.probe["classification"])
self.perc.set(self.probe['success'])
self.timef.set(self.probe['time'])
self.modif.set(self.probe['mod'])
self.desc.set(self.probe['description'])
# def maneuver_ep(self, manlvl = "routine", number = 0):
# '''
# Adds EPs by maneuvers.
# @param manlvl difficulty of maneuver
# @param number number of maneuvers of this level
# '''
# from rpgToolDefinitions.epcalcdefs import maneuvers
#
# self.man_ep += maneuvers[manlvl]['ep'] * number
if __name__ == '__main__':
with open("/home/mongol/git/rpg-tools/src/data/groups/charparty.json", "r") as fp:
cl = json.load(fp)
mantan = rpg.statManeuver
rrtab = rpg.RRroll
win = EPCalcWin() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE.txt for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.10.4'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import sys
import base64
import cgi
import email.utils
import functools
import hmac
import httplib
import imp
import itertools
import mimetypes
import os
import re
import subprocess
import tempfile
import thread
import threading
import time
import warnings
from Cookie import SimpleCookie
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from urlparse import urljoin, SplitResult as UrlSplitResult
# Workaround for a bug in some versions of lib2to3 (fixed on CPython 2.7 and 3.2)
import urllib
urlencode = urllib.urlencode
urlquote = urllib.quote
urlunquote = urllib.unquote
try: from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try: from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try: import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try: from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
py3k = sys.version_info >= (3,0,0)
NCTextIOWrapper = None
if sys.version_info < (2,6,0):
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
if py3k: # pragma: no cover
json_loads = lambda s: json_lds(touni(s))
# See Request.POST
from io import BytesIO
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return str(x, enc, err) if isinstance(x, bytes) else str(x)
if sys.version_info < (3,2,0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes
the wrapped buffer. This subclass keeps it open. '''
def close(self): pass
else:
json_loads = json_lds
from StringIO import StringIO as BytesIO
bytes = str
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return x if isinstance(x, unicode) else unicode(str(x), enc, err)
def tob(data, enc='utf8'):
""" Convert anything to bytes """
return data.encode(enc) if isinstance(data, unicode) else bytes(data)
tonat = touni if py3k else tob
tonat.__doc__ = """ Convert anything to native strings """
def try_update_wrapper(wrapper, wrapped, *a, **ka):
try: # Bug: functools breaks if wrapper is an instane method
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# Backward compatibility
def depr(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Small helpers
def makelist(data):
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class CachedProperty(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
cached_property = CachedProperty
class lazy_attribute(object): # Does not need configuration -> lower-case name
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
#TODO: These should subclass BaseRequest
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return template(ERROR_PAGE_TEMPLATE, e=self)
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: Sorry for the mess. It works. Trust me.
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def __init__(self, strict=False):
self.rules = {} # A {rule: Rule} mapping
self.builder = {} # A rule/name->build_info mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {'re': self.re_filter, 'int': self.int_filter,
'float': self.float_filter, 'path': self.path_filter}
def re_filter(self, conf):
return conf or self.default_pattern, None, None
def int_filter(self, conf):
return r'-?\d+', int, lambda x: str(int(x))
def float_filter(self, conf):
return r'-?[\d.]+', float, lambda x: str(float(x))
def path_filter(self, conf):
return r'.*?', None, None
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
def parse_rule(self, rule):
''' Parses a rule into a (name, filter, conf) token stream. If mode is
None, name contains a static rule part. '''
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix: yield prefix, None, None
name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
if not filtr: filtr = self.default_filter
yield name, filtr, conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.rules:
self.rules[rule][method] = target
if name: self.builder[name] = self.builder[rule]
return
target = self.rules[rule] = {method: target}
# Build pattern and other structures for dynamic routes
anons = 0 # Number of anonymous wildcards
pattern = '' # Regular expression pattern
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self.parse_rule(rule):
if mode:
is_static = False
mask, in_filter, out_filter = self.filters[mode](conf)
if key:
pattern += '(?P<%s>%s)' % (key, mask)
else:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons; anons += 1
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static[self.build(rule)] = target
return
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
try:
re_match = re.compile('^(%s)$' % pattern).match
except re.error, e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
def match(path):
""" Return an url-argument dictionary. """
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
try:
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((match, target))
except (AssertionError, IndexError), e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
[(match, target)]))
return match
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError, e:
raise RouteBuildError('Missing URL argument: %r' % e.args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
if path in self.static:
targets = self.static[path]
else:
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
getargs, targets = rules[match.lastindex - 1]
urlargs = getargs(path) if getargs else {}
break
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
try_update_wrapper(callback, self.callback)
return callback
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.plugins = [] # List of installed plugins.
self.error_handler = {}
#: If true, most exceptions are catched and returned as :exc:`HTTPError`
self.config = ConfigDict(config or {})
self.catchall = catchall
#: An instance of :class:`HooksPlugin`. Empty by default.
self.hooks = HooksPlugin()
self.install(self.hooks)
if autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
parts = filter(None, prefix.split('/'))
if not parts: raise ValueError('Empty path prefix.')
path_depth = len(parts)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
@self.route('/%s/:#.*#' % '/'.join(parts), **options)
def mountpoint():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
rs.body = itertools.chain(rs.body, app(request.environ, start_response))
return HTTPResponse(rs.body, rs.status, rs.headers)
finally:
request.path_shift(-path_depth)
if not prefix.endswith('/'):
self.route('/' + '/'.join(parts), callback=mountpoint, **options)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.routes.append(route)
self.router.add(rule, verb, route, name=name)
if DEBUG: route.prepare()
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. """
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are catched and returned.
If :attr:`Bottle.catchall` is true, other exceptions are catched as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
try:
route, args = self.router.match(environ)
environ['route.handle'] = environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse, r:
return r
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
stacktrace = format_exc(10)
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", e, stacktrace)
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9
return self._cast(out, request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse, e:
first = e
except Exception, e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)), request, response)
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = self._cast(self._handle(environ), request, response)
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc(10)))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(DictMixin):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only."""
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = environ
environ['bottle.request'] = self
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
return FormsDict((c.key, c.value) for c in cookies.itervalues())
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.iterallitems():
params[key] = value
for key, value in self.forms.iterallitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.reader, self.writer, self.default = name, reader, writer, default
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name)
return self.reader(value) if (value and self.reader) else (value or self.default)
def __set__(self, obj, value):
if self.writer: value = self.writer(value)
obj.headers[self.name] = value
def __delete__(self, obj):
if self.name in obj.headers:
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._status_line = None
self._status_code = None
self.body = body
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = status or ('%d Unknown' % code)
def _get_status(self):
depr('BaseReuqest.status will change to return a string in 0.11. Use'\
' status_line and status_code to make sure.') #0.10
return self._status_code
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updates accordingly. The return value is
always a numeric code. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
if append:
self.add_header(name, value)
else:
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.iteritems()
bad_headers = self.bad_headers.get(self.status_code)
if bad_headers:
headers = [h for h in headers if h[0] not in bad_headers]
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString()
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.iteritems():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
class LocalRequest(BaseRequest, threading.local):
''' A thread-local subclass of :class:`BaseRequest`. '''
def __init__(self): pass
bind = BaseRequest.__init__
class LocalResponse(BaseResponse, threading.local):
''' A thread-local subclass of :class:`BaseResponse`. '''
bind = BaseResponse.__init__
Response = LocalResponse # BC 0.9
Request = LocalRequest # BC 0.9
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
api = 2
_names = 'before_request', 'after_request', 'app_reset'
def __init__(self):
self.hooks = dict((name, []) for name in self._names)
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
was_empty = self._empty()
self.hooks.setdefault(name, []).append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def trigger(self, name, *a, **ka):
''' Trigger a hook and return a list of results. '''
hooks = self.hooks[name]
if ka.pop('reversed', False): hooks = hooks[::-1]
return [hook(*a, **ka) for hook in hooks]
def apply(self, callback, context):
if self._empty(): return callback
def wrapper(*a, **ka):
self.trigger('before_request')
rv = callback(*a, **ka)
self.trigger('after_request', reversed=True)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in route.config:
depr('The `template_opts` parameter is deprecated.') #0.9
return view(conf, **route.config['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for k, v in dict(*a, **k).iteritems())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self): return ((k, v[-1]) for (k, v) in self.dict.iteritems())
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
# 2to3 is not able to fix these automatically.
keys = iterkeys if py3k else lambda self: list(self.iterkeys())
values = itervalues if py3k else lambda self: list(self.itervalues())
items = iteritems if py3k else lambda self: list(self.iteritems())
allitems = iterallitems if py3k else lambda self: list(self.iterallitems())
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception, e:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attribues are automatiically de- or
recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
def getunicode(self, name, default=None, encoding=None):
value, enc = self.get(name, default), encoding or self.input_encoding
try:
if isinstance(value, bytes): # Python 2 WSGI
return value.decode(enc)
elif isinstance(value, unicode): # Python 3 WSGI
return value.encode('latin1').decode(enc)
return value
except UnicodeError, e:
return default
def __getattr__(self, name): return self.getunicode(name, default=u'')
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in map(_hkey, names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-subclass with some extras: You can access keys like attributes.
Uppercase attributes create new ConfigDicts and act as name-spaces.
Other missing attributes return None. Calling a ConfigDict updates its
values and returns itself.
>>> cfg = ConfigDict()
>>> cfg.Namespace.value = 5
>>> cfg.OtherNamespace(a=1, b=2)
>>> cfg
{'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
'''
def __getattr__(self, key):
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
for key, value in dict(*a, **ka).iteritems(): setattr(self, key, value)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part: break
yield part
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if code is None:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: header['Content-Type'] = mimetype
if encoding: header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
#TODO: Add 2to3 save base64[encode/decode] functions.
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
depr('Use route wildcard filters instead.')
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(*args, **kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
for name in '''route get post put delete error mount
hook install uninstall'''.split():
globals()[name] = make_default_app_wrapper(name)
url = make_default_app_wrapper('get_url')
del name
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
print "WARNING: Auto-reloading does not work with Fapws3."
print " (Fapws3 breaks python thread support)"
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey, local
if self.options.get('monkey', True):
if not threading.local is local.local: monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
wsgi.WSGIServer((self.host, self.port), handler).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [PasteServer, CherryPyServer, TwistedServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
stderr = sys.stderr.write
try:
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
stderr("Bottle server starting up (using %s)...\n" % repr(server))
stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SyntaxError, ImportError):
if not reloader: raise
if not getattr(server, 'quiet', False): print_exc()
sys.exit(3)
finally:
if not getattr(server, 'quiet', False): stderr('Shutdown...\n')
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Untested! '''
def prepare(self, **options):
from simpletal import simpleTAL
# TODO: add option to load METAL files during render
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
for dictarg in args: kwargs.update(dictarg)
# TODO: maybe reuse a context instead of always creating one
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source or open(self.filename, 'rb').read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return self.subtemplate(subtpl,_stdout,rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.iteritems())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%try:
%from bottle import DEBUG, HTTP_CODES, request, touni
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.output}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%end
"""
#: A thread-safe instance of :class:`Request` representing the `current` request.
request = Request()
#: A thread-safe instance of :class:`Response` used to build the HTTP response.
response = Response()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect(__name__+'.ext', 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
print 'Bottle', __version__; sys.exit(0)
if not args:
parser.print_help()
print '\nError: No application specified.\n'
sys.exit(1)
try:
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
except (AttributeError, ImportError), e:
parser.error(e.args[0])
if opt.bind and ':' in opt.bind:
host, port = opt.bind.rsplit(':', 1)
else:
host, port = (opt.bind or 'localhost'), 8080
debug(opt.debug)
run(args[0], host=host, port=port, server=opt.server, reloader=opt.reload, plugins=opt.plugin)
# THE END | unknown | codeparrot/codeparrot-clean | ||
// Tests that type error points to the path in attribute
use serde_derive::Deserialize;
#[derive(Deserialize)]
#[serde(default = "main")]
struct Struct {
#[serde(default = "main")]
f1: u8,
f2: u8,
#[serde(default = "main")]
f3: i8,
}
fn main() {} | rust | github | https://github.com/serde-rs/serde | test_suite/tests/ui/default-attribute/incorrect_type_struct.rs |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import SceneXplainAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SceneXplainAPIWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SceneXplainAPIWrapper",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/utilities/scenexplain.py |
//
// MasterViewController.swift
//
// Copyright (c) 2014-2018 Alamofire Software Foundation (http://alamofire.org/)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
import Alamofire
import UIKit
class MasterViewController: UITableViewController {
// MARK: - Properties
@IBOutlet var titleImageView: UIImageView!
var detailViewController: DetailViewController?
private var reachability: NetworkReachabilityManager!
// MARK: - View Lifecycle
override func awakeFromNib() {
super.awakeFromNib()
navigationItem.titleView = titleImageView
clearsSelectionOnViewWillAppear = true
reachability = NetworkReachabilityManager.default
monitorReachability()
}
// MARK: - UIStoryboardSegue
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if
let navigationController = segue.destination as? UINavigationController,
let detailViewController = navigationController.topViewController as? DetailViewController {
func requestForSegue(_ segue: UIStoryboardSegue) -> Request? {
switch segue.identifier! {
case "GET":
detailViewController.segueIdentifier = "GET"
return AF.request("https://httpbin.org/get")
case "POST":
detailViewController.segueIdentifier = "POST"
return AF.request("https://httpbin.org/post", method: .post)
case "PUT":
detailViewController.segueIdentifier = "PUT"
return AF.request("https://httpbin.org/put", method: .put)
case "DELETE":
detailViewController.segueIdentifier = "DELETE"
return AF.request("https://httpbin.org/delete", method: .delete)
case "DOWNLOAD":
detailViewController.segueIdentifier = "DOWNLOAD"
let destination = DownloadRequest.suggestedDownloadDestination(for: .cachesDirectory,
in: .userDomainMask)
return AF.download("https://httpbin.org/stream/1", to: destination)
default:
return nil
}
}
if let request = requestForSegue(segue) {
detailViewController.request = request
}
}
}
// MARK: - UITableViewDelegate
override func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) {
if indexPath.section == 3 && indexPath.row == 0 {
print("Reachability Status: \(reachability.status)")
tableView.deselectRow(at: indexPath, animated: true)
}
}
// MARK: - Private - Reachability
private func monitorReachability() {
reachability.startListening { status in
print("Reachability Status Changed: \(status)")
}
}
} | swift | github | https://github.com/Alamofire/Alamofire | Example/Source/MasterViewController.swift |
## 1.15.0 (Unreleased)
NEW FEATURES:
* We now produce builds for Windows ARM64 ([#32719](https://github.com/hashicorp/terraform/issues/32719))
* You can set a `deprecated` attribute on variable and output blocks to indicate that they are deprecated. This will produce warnings when passing in a value for a deprecated variable or when referencing a deprecated output. ([#38001](https://github.com/hashicorp/terraform/issues/38001))
* backend/s3: Support authentication via `aws login` ([#37967](https://github.com/hashicorp/terraform/issues/37967))
ENHANCEMENTS:
* ssh-based provisioner (file + remote-exec): Re-enable support for PowerShell ([#37794](https://github.com/hashicorp/terraform/issues/37794))
* terraform init log timestamps include millisecond precision ([#37818](https://github.com/hashicorp/terraform/issues/37818))
* init: skip dependencies declared in development override. This allows you to use `terraform init` with developer overrides and install dependencies that are not declared in the override file. ([#37884](https://github.com/hashicorp/terraform/issues/37884))
* Terraform Test: Allow functions within mock blocks ([#34672](https://github.com/hashicorp/terraform/issues/34672))
* improve detection of deprecated resource attributes / blocks ([#38077](https://github.com/hashicorp/terraform/issues/38077))
BUG FIXES:
* testing: File-level error diagnostics are now included in JUnit XML skipped test elements, ensuring CI/CD pipelines can detect validation failures ([#37801](https://github.com/hashicorp/terraform/issues/37801))
* A refresh-only plan could result in a non-zero exit code with no changes ([#37406](https://github.com/hashicorp/terraform/issues/37406))
* cli: Fixed crash in `terraform show -json` when plan contains ephemeral resources with preconditions or postconditions ([#37834](https://github.com/hashicorp/terraform/issues/37834))
* cli: Fixed `terraform init -json` to properly format all backend configuration messages as JSON instead of plain text ([#37911](https://github.com/hashicorp/terraform/issues/37911))
* `state show`: The `state show` command will now explicitly fail and return code 1 when it fails to render the named resources state ([#37933](https://github.com/hashicorp/terraform/issues/37933))
* apply: Terraform will raise an explicit error if a plan file intended for one workspace is applied against another workspace ([#37954](https://github.com/hashicorp/terraform/issues/37954))
* lifecycle: `replace_triggered_by` now reports an error when given an invalid attribute reference that does not exist in the target resource ([#36740](https://github.com/hashicorp/terraform/issues/36740))
* backend: Fix nil pointer dereference crash during `terraform init` when the destination backend returns an error ([#38027](https://github.com/hashicorp/terraform/issues/38027))
* stacks: send progress events if the plan fails for better UI integration ([#38039](https://github.com/hashicorp/terraform/issues/38039))
* cloud: terraform cloud and registry discovery network requests are now more resilient, making temporary network or service related errors less common ([#38064](https://github.com/hashicorp/terraform/issues/38064))
* stacks: component instances should report no-op plan/apply. This solves a UI inconsistency with convergence destroy plans ([#38049](https://github.com/hashicorp/terraform/issues/38049))
UPGRADE NOTES:
* backend/s3: The `AWS_USE_FIPS_ENDPOINT` and `AWS_USE_DUALSTACK_ENDPOINT` environment variables now only respect `true` or `false` values, aligning with the AWS SDK for Go. This replaces the previous behavior which treated any non-empty value as `true`. ([#37601](https://github.com/hashicorp/terraform/issues/37601))
EXPERIMENTS:
Experiments are only enabled in alpha releases of Terraform CLI. The following features are not yet available in stable releases.
- The experimental "deferred actions" feature, enabled by passing the `-allow-deferral` option to `terraform plan`, permits `count` and `for_each` arguments in `module`, `resource`, and `data` blocks to have unknown values and allows providers to react more flexibly to unknown values.
- `terraform test cleanup`: The experimental `test cleanup` command. In experimental builds of Terraform, a manifest file and state files for each failed cleanup operation during test operations are saved within the `.terraform` local directory. The `test cleanup` command will attempt to clean up the local state files left behind automatically, without requiring manual intervention.
- `terraform test`: `backend` blocks and `skip_cleanup` attributes:
- Test authors can now specify `backend` blocks within `run` blocks in Terraform Test files. Run blocks with `backend` blocks will load state from the specified backend instead of starting from empty state on every execution. This allows test authors to keep long-running test infrastructure alive between test operations, saving time during regular test operations.
- Test authors can now specify `skip_cleanup` attributes within test files and within run blocks. The `skip_cleanup` attribute tells `terraform test` not to clean up state files produced by run blocks with this attribute set to true. The state files for affected run blocks will be written to disk within the `.terraform` directory, where they can then be cleaned up manually using the also experimental `terraform test cleanup` command.
## Previous Releases
For information on prior major and minor releases, refer to their changelogs:
- [v1.14](https://github.com/hashicorp/terraform/blob/v1.14/CHANGELOG.md)
- [v1.13](https://github.com/hashicorp/terraform/blob/v1.13/CHANGELOG.md)
- [v1.12](https://github.com/hashicorp/terraform/blob/v1.12/CHANGELOG.md)
- [v1.11](https://github.com/hashicorp/terraform/blob/v1.11/CHANGELOG.md)
- [v1.10](https://github.com/hashicorp/terraform/blob/v1.10/CHANGELOG.md)
- [v1.9](https://github.com/hashicorp/terraform/blob/v1.9/CHANGELOG.md)
- [v1.8](https://github.com/hashicorp/terraform/blob/v1.8/CHANGELOG.md)
- [v1.7](https://github.com/hashicorp/terraform/blob/v1.7/CHANGELOG.md)
- [v1.6](https://github.com/hashicorp/terraform/blob/v1.6/CHANGELOG.md)
- [v1.5](https://github.com/hashicorp/terraform/blob/v1.5/CHANGELOG.md)
- [v1.4](https://github.com/hashicorp/terraform/blob/v1.4/CHANGELOG.md)
- [v1.3](https://github.com/hashicorp/terraform/blob/v1.3/CHANGELOG.md)
- [v1.2](https://github.com/hashicorp/terraform/blob/v1.2/CHANGELOG.md)
- [v1.1](https://github.com/hashicorp/terraform/blob/v1.1/CHANGELOG.md)
- [v1.0](https://github.com/hashicorp/terraform/blob/v1.0/CHANGELOG.md)
- [v0.15](https://github.com/hashicorp/terraform/blob/v0.15/CHANGELOG.md)
- [v0.14](https://github.com/hashicorp/terraform/blob/v0.14/CHANGELOG.md)
- [v0.13](https://github.com/hashicorp/terraform/blob/v0.13/CHANGELOG.md)
- [v0.12](https://github.com/hashicorp/terraform/blob/v0.12/CHANGELOG.md)
- [v0.11 and earlier](https://github.com/hashicorp/terraform/blob/v0.11/CHANGELOG.md) | unknown | github | https://github.com/hashicorp/terraform | CHANGELOG.md |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestreportservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.2.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestReportService",
author_email="",
url="",
keywords=["Swagger", "AutoRestReportService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
) | unknown | codeparrot/codeparrot-clean | ||
//go:build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"reflect"
"strconv"
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
pkgfeatures "k8s.io/kubernetes/pkg/features"
)
// getResourceList returns a ResourceList with the
// specified cpu and memory resource values
func getResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
// getResourceRequirements returns a ResourceRequirements object
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
}
func TestResourceConfigForPod(t *testing.T) {
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond) // in microseconds
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) // in microseconds
tunedQuota := int64(1 * time.Millisecond / time.Microsecond)
minShares := uint64(MinShares)
burstableShares := MilliCPUToShares(100)
memoryQuantity := resource.MustParse("200Mi")
burstableMemory := memoryQuantity.Value()
burstablePartialShares := MilliCPUToShares(200)
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
guaranteedShares := MilliCPUToShares(100)
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
memoryQuantity = resource.MustParse("100Mi")
cpuNoLimit := int64(-1)
guaranteedMemory := memoryQuantity.Value()
testCases := []struct {
description string
pod *v1.Pod
expected *ResourceConfig
enforceCPULimits bool
quotaPeriod uint64 // in microseconds
podLevelResourcesEnabled bool
}{
{
description: "besteffort",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &minShares},
},
{
description: "burstable-no-limits",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares},
},
{
description: "burstable-with-limits",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
{
description: "burstable-with-limits-no-cpu-enforcement",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
{
description: "burstable-partial-limits",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
},
{
description: "burstable-with-limits-with-tuned-quota",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &tunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
},
{
description: "burstable-with-limits-no-cpu-enforcement-with-tuned-quota",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
},
{
description: "burstable-partial-limits-with-tuned-quota",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
},
{
description: "guaranteed",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
{
description: "guaranteed-no-cpu-enforcement",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
{
description: "guaranteed-with-tuned-quota",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
},
{
description: "guaranteed-no-cpu-enforcement-with-tuned-quota",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
},
{
description: "burstable-partial-limits-with-init-containers",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100m"), getResourceList("100m", "100Mi")),
},
{
Resources: getResourceRequirements(getResourceList("100m", "100m"), getResourceList("", "")),
},
},
InitContainers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100m"), getResourceList("100m", "100Mi")),
},
{
Resources: getResourceRequirements(getResourceList("100m", "100m"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
},
{
description: "besteffort-with-pod-level-resources-enabled",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("", ""),
Limits: getResourceList("", ""),
},
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &minShares},
},
{
description: "burstable-with-pod-level-requests",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with no resources",
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares},
},
{
description: "burstable-with-pod-and-container-level-requests",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("", "")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares},
},
{
description: "burstable-with-pod-level-resources",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("200m", "200Mi"),
},
Containers: []v1.Container{
{
Name: "Container with no resources",
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
{
description: "burstable-with-pod-and-container-level-resources",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("200m", "200Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "100Mi")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
{
description: "burstable-with-partial-pod-level-resources-limits",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("200m", "300Mi"),
},
Containers: []v1.Container{
{
Name: "Container with guaranteed resources",
Resources: getResourceRequirements(getResourceList("200m", "200Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
{
description: "guaranteed-with-pod-level-resources",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with no resources",
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
{
description: "guaranteed-with-pod-and-container-level-resources",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "100Mi")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
{
description: "guaranteed-pod-level-resources-with-init-containers",
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "50Mi")),
},
},
InitContainers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "50Mi")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled)
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
t.Errorf("cpu period not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUPeriod, *actual.CPUPeriod)
}
if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) {
t.Errorf("cpu quota not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUQuota, *actual.CPUQuota)
}
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
t.Errorf("cpu shares not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUShares, *actual.CPUShares)
}
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
t.Errorf("memory not as expected. Expected: %v, Actual:%v", *testCase.expected.Memory, *actual.Memory)
}
})
}
}
func TestMilliCPUToQuota(t *testing.T) {
testCases := []struct {
input int64
quota int64
period uint64
}{
{
input: int64(0),
quota: int64(0),
period: uint64(0),
},
{
input: int64(5),
quota: int64(1000),
period: uint64(100000),
},
{
input: int64(9),
quota: int64(1000),
period: uint64(100000),
},
{
input: int64(10),
quota: int64(1000),
period: uint64(100000),
},
{
input: int64(200),
quota: int64(20000),
period: uint64(100000),
},
{
input: int64(500),
quota: int64(50000),
period: uint64(100000),
},
{
input: int64(1000),
quota: int64(100000),
period: uint64(100000),
},
{
input: int64(1500),
quota: int64(150000),
period: uint64(100000),
},
}
for _, testCase := range testCases {
quota := MilliCPUToQuota(testCase.input, int64(testCase.period))
if quota != testCase.quota {
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.quota, quota)
}
}
}
func TestHugePageLimits(t *testing.T) {
Mi := int64(1024 * 1024)
type inputStruct struct {
key string
input string
}
testCases := []struct {
name string
inputs []inputStruct
expected map[int64]int64
}{
{
name: "no valid hugepages",
inputs: []inputStruct{
{
key: "2Mi",
input: "128",
},
},
expected: map[int64]int64{},
},
{
name: "2Mi only",
inputs: []inputStruct{
{
key: v1.ResourceHugePagesPrefix + "2Mi",
input: "128",
},
},
expected: map[int64]int64{2 * Mi: 128},
},
{
name: "2Mi and 4Mi",
inputs: []inputStruct{
{
key: v1.ResourceHugePagesPrefix + "2Mi",
input: "128",
},
{
key: v1.ResourceHugePagesPrefix + strconv.FormatInt(2*Mi, 10),
input: "256",
},
{
key: v1.ResourceHugePagesPrefix + "4Mi",
input: "512",
},
{
key: "4Mi",
input: "1024",
},
},
expected: map[int64]int64{2 * Mi: 384, 4 * Mi: 512},
},
}
for _, testcase := range testCases {
t.Run(testcase.name, func(t *testing.T) {
resourceList := v1.ResourceList{}
for _, input := range testcase.inputs {
value, err := resource.ParseQuantity(input.input)
if err != nil {
t.Fatalf("error in parsing hugepages, value: %s", input.input)
} else {
resourceList[v1.ResourceName(input.key)] = value
}
}
resultValue := HugePageLimits(resourceList)
if !reflect.DeepEqual(testcase.expected, resultValue) {
t.Errorf("unexpected result for HugePageLimits(), expected: %v, actual: %v", testcase.expected, resultValue)
}
// ensure ResourceConfigForPod uses HugePageLimits correctly internally
p := v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: resourceList,
},
},
},
},
}
resultValuePod := ResourceConfigForPod(&p, false, 0, false)
if !reflect.DeepEqual(testcase.expected, resultValuePod.HugePageLimit) {
t.Errorf("unexpected result for ResourceConfigForPod(), expected: %v, actual: %v", testcase.expected, resultValuePod)
}
})
}
}
func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) {
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond) // in microseconds
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) // in microseconds
minShares := uint64(MinShares)
burstableShares := MilliCPUToShares(100)
memoryQuantity := resource.MustParse("200Mi")
burstableMemory := memoryQuantity.Value()
burstablePartialShares := MilliCPUToShares(200)
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
guaranteedShares := MilliCPUToShares(100)
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
memoryQuantity = resource.MustParse("100Mi")
cpuNoLimit := int64(-1)
guaranteedMemory := memoryQuantity.Value()
testCases := map[string]struct {
pod *v1.Pod
expected *ResourceConfig
enforceCPULimits bool
quotaPeriod uint64 // in microseconds
}{
"besteffort": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &minShares},
},
"burstable-no-limits": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, Unified: map[string]string{"memory.min": "104857600"}},
},
"burstable-with-limits": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"burstable-with-limits-no-cpu-enforcement": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"burstable-partial-limits": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares, Unified: map[string]string{"memory.min": "209715200"}},
},
"burstable-with-limits-with-tuned-quota": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"burstable-partial-limits-with-tuned-quota": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares, Unified: map[string]string{"memory.min": "209715200"}},
},
"guaranteed": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"guaranteed-no-cpu-enforcement": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"guaranteed-with-tuned-quota": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: true,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: false,
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}},
},
}
for testName, testCase := range testCases {
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, true)
if !reflect.DeepEqual(actual.Unified, testCase.expected.Unified) {
t.Errorf("unexpected result, test: %v, unified not as expected", testName)
}
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/cm/helpers_linux_test.go |
/*
* Python Universal Functions Object -- Math for all types, plus fast
* arrays math
*
* Full description
*
* This supports mathematical (and Boolean) functions on arrays and other python
* objects. Math on large arrays of basic C types is rather efficient.
*
* Travis E. Oliphant 2005, 2006 oliphant@ee.byu.edu (oliphant.travis@ieee.org)
* Brigham Young University
*
* based on the
*
* Original Implementation:
* Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu
*
* with inspiration and code from
* Numarray
* Space Science Telescope Institute
* J. Todd Miller
* Perry Greenfield
* Rick White
*
*/
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#define _UMATHMODULE
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <stddef.h>
#include "npy_config.h"
#include "npy_pycompat.h"
#include "npy_argparse.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "numpy/arrayscalars.h"
#include "lowlevel_strided_loops.h"
#include "ufunc_type_resolution.h"
#include "reduction.h"
#include "mem_overlap.h"
#include "npy_hashtable.h"
#include "conversion_utils.h"
#include "ufunc_object.h"
#include "override.h"
#include "npy_import.h"
#include "extobj.h"
#include "alloc.h"
#include "arrayobject.h"
#include "arraywrap.h"
#include "common.h"
#include "ctors.h"
#include "dtypemeta.h"
#include "numpyos.h"
#include "dispatching.h"
#include "convert_datatype.h"
#include "legacy_array_method.h"
#include "abstractdtypes.h"
#include "mapping.h"
#include "npy_static_data.h"
#include "multiarraymodule.h"
#include "number.h"
#include "scalartypes.h" // for is_anyscalar_exact and scalar_value
/********** PRINTF DEBUG TRACING **************/
#define NPY_UF_DBG_TRACING 0
#if NPY_UF_DBG_TRACING
#define NPY_UF_DBG_PRINT(s) {printf("%s", s);fflush(stdout);}
#define NPY_UF_DBG_PRINT1(s, p1) {printf((s), (p1));fflush(stdout);}
#define NPY_UF_DBG_PRINT2(s, p1, p2) {printf(s, p1, p2);fflush(stdout);}
#define NPY_UF_DBG_PRINT3(s, p1, p2, p3) {printf(s, p1, p2, p3);fflush(stdout);}
#else
#define NPY_UF_DBG_PRINT(s)
#define NPY_UF_DBG_PRINT1(s, p1)
#define NPY_UF_DBG_PRINT2(s, p1, p2)
#define NPY_UF_DBG_PRINT3(s, p1, p2, p3)
#endif
/**********************************************/
typedef struct {
PyObject *in; /* The input arguments to the ufunc, a tuple */
PyObject *out; /* The output arguments, a tuple. If no non-None outputs are
provided, then this is NULL. */
} ufunc_full_args;
/* ---------------------------------------------------------------- */
static PyObject *
prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc);
static int
resolve_descriptors(int nop,
PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl,
PyArrayObject *operands[], PyArray_Descr *dtypes[],
PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[],
PyObject *inputs_tup, NPY_CASTING casting);
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_getfperr(void)
{
/*
* non-clearing get was only added in 1.9 so this function always cleared
* keep it so just in case third party code relied on the clearing
*/
char param = 0;
return npy_clear_floatstatus_barrier(¶m);
}
/* Checking the status flag clears it */
/*UFUNC_API*/
NPY_NO_EXPORT void
PyUFunc_clearfperr()
{
char param = 0;
npy_clear_floatstatus_barrier(¶m);
}
/* This many operands we optimize for on the stack. */
#define UFUNC_STACK_NARGS 5
#define NPY_UFUNC_DEFAULT_INPUT_FLAGS \
NPY_ITER_READONLY | \
NPY_ITER_ALIGNED | \
NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
#define NPY_UFUNC_DEFAULT_OUTPUT_FLAGS \
NPY_ITER_ALIGNED | \
NPY_ITER_ALLOCATE | \
NPY_ITER_NO_BROADCAST | \
NPY_ITER_NO_SUBTYPE | \
NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
/* Called at module initialization to set the matmul ufunc output flags */
NPY_NO_EXPORT int
set_matmul_flags(PyObject *d)
{
PyObject *matmul = NULL;
int result = PyDict_GetItemStringRef(d, "matmul", &matmul);
if (result <= 0) {
// caller sets an error if one isn't already set
return -1;
}
/*
* The default output flag NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE allows
* perfectly overlapping input and output (in-place operations). While
* correct for the common mathematical operations, this assumption is
* incorrect in the general case and specifically in the case of matmul.
*
* NPY_ITER_UPDATEIFCOPY is added by default in
* PyUFunc_GeneralizedFunction, which is the variant called for gufuncs
* with a signature
*
* Enabling NPY_ITER_WRITEONLY can prevent a copy in some cases.
*/
((PyUFuncObject *)matmul)->op_flags[2] = (NPY_ITER_WRITEONLY |
NPY_ITER_UPDATEIFCOPY |
NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) &
~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
Py_DECREF(matmul);
return 0;
}
/*
* Set per-operand flags according to desired input or output flags.
* op_flags[i] for i in input (as determined by ufunc->nin) will be
* merged with op_in_flags, perhaps overriding per-operand flags set
* in previous stages.
* op_flags[i] for i in output will be set to op_out_flags only if previously
* unset.
* The input flag behavior preserves backward compatibility, while the
* output flag behaviour is the "correct" one for maximum flexibility.
*/
NPY_NO_EXPORT void
_ufunc_setup_flags(PyUFuncObject *ufunc, npy_uint32 op_in_flags,
npy_uint32 op_out_flags, npy_uint32 *op_flags)
{
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = nin + nout, i;
/* Set up the flags */
for (i = 0; i < nin; ++i) {
op_flags[i] = ufunc->op_flags[i] | op_in_flags;
/*
* If READWRITE flag has been set for this operand,
* then clear default READONLY flag
*/
if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
op_flags[i] &= ~NPY_ITER_READONLY;
}
}
for (i = nin; i < nop; ++i) {
op_flags[i] = ufunc->op_flags[i] ? ufunc->op_flags[i] : op_out_flags;
}
}
/* Return the position of next non-white-space char in the string */
static int
_next_non_white_space(const char* str, int offset)
{
int ret = offset;
while (str[ret] == ' ' || str[ret] == '\t') {
ret++;
}
return ret;
}
static int
_is_alpha_underscore(char ch)
{
return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_';
}
static int
_is_alnum_underscore(char ch)
{
return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9');
}
/*
* Convert a string into a number
*/
static npy_intp
_get_size(const char* str)
{
char *stop;
npy_longlong size = NumPyOS_strtoll(str, &stop, 10);
if (stop == str || _is_alpha_underscore(*stop)) {
/* not a well formed number */
return -1;
}
if (size >= NPY_MAX_INTP || size <= NPY_MIN_INTP) {
/* len(str) too long */
return -1;
}
return size;
}
/*
* Return the ending position of a variable name including optional modifier
*/
static int
_get_end_of_name(const char* str, int offset)
{
int ret = offset;
while (_is_alnum_underscore(str[ret])) {
ret++;
}
if (str[ret] == '?') {
ret ++;
}
return ret;
}
/*
* Returns 1 if the dimension names pointed by s1 and s2 are the same,
* otherwise returns 0.
*/
static int
_is_same_name(const char* s1, const char* s2)
{
while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) {
if (*s1 != *s2) {
return 0;
}
s1++;
s2++;
}
return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2);
}
/*
* Sets the following fields in the PyUFuncObject 'ufunc':
*
* Field Type Array Length
* core_enabled int (effectively bool) N/A
* core_num_dim_ix int N/A
* core_dim_flags npy_uint32 * core_num_dim_ix
* core_dim_sizes npy_intp * core_num_dim_ix
* core_num_dims int * nargs (i.e. nin+nout)
* core_offsets int * nargs
* core_dim_ixs int * sum(core_num_dims)
* core_signature char * strlen(signature) + 1
*
* The function assumes that the values that are arrays have not
* been set already, and sets these pointers to memory allocated
* with PyArray_malloc. These are freed when the ufunc dealloc
* method is called.
*
* Returns 0 unless an error occurred.
*/
static int
_parse_signature(PyUFuncObject *ufunc, const char *signature)
{
size_t len;
char const **var_names;
int nd = 0; /* number of dimension of the current argument */
int cur_arg = 0; /* index into core_num_dims&core_offsets */
int cur_core_dim = 0; /* index into core_dim_ixs */
int i = 0;
char *parse_error = NULL;
if (signature == NULL) {
PyErr_SetString(PyExc_RuntimeError,
"_parse_signature with NULL signature");
return -1;
}
len = strlen(signature);
ufunc->core_signature = PyArray_malloc(sizeof(char) * (len+1));
if (ufunc->core_signature) {
strcpy(ufunc->core_signature, signature);
}
/* Allocate sufficient memory to store pointers to all dimension names */
var_names = PyArray_malloc(sizeof(char const*) * len);
if (var_names == NULL) {
PyErr_NoMemory();
return -1;
}
ufunc->core_enabled = 1;
ufunc->core_num_dim_ix = 0;
ufunc->core_num_dims = PyArray_malloc(sizeof(int) * ufunc->nargs);
ufunc->core_offsets = PyArray_malloc(sizeof(int) * ufunc->nargs);
/* The next three items will be shrunk later */
ufunc->core_dim_ixs = PyArray_malloc(sizeof(int) * len);
ufunc->core_dim_sizes = PyArray_malloc(sizeof(npy_intp) * len);
ufunc->core_dim_flags = PyArray_malloc(sizeof(npy_uint32) * len);
if (ufunc->core_num_dims == NULL || ufunc->core_dim_ixs == NULL ||
ufunc->core_offsets == NULL ||
ufunc->core_dim_sizes == NULL ||
ufunc->core_dim_flags == NULL) {
PyErr_NoMemory();
goto fail;
}
for (size_t j = 0; j < len; j++) {
ufunc->core_dim_flags[j] = 0;
}
i = _next_non_white_space(signature, 0);
while (signature[i] != '\0') {
/* loop over input/output arguments */
if (cur_arg == ufunc->nin) {
/* expect "->" */
if (signature[i] != '-' || signature[i+1] != '>') {
parse_error = "expect '->'";
goto fail;
}
i = _next_non_white_space(signature, i + 2);
}
/*
* parse core dimensions of one argument,
* e.g. "()", "(i)", or "(i,j)"
*/
if (signature[i] != '(') {
parse_error = "expect '('";
goto fail;
}
i = _next_non_white_space(signature, i + 1);
while (signature[i] != ')') {
/* loop over core dimensions */
int ix, i_end;
npy_intp frozen_size;
npy_bool can_ignore;
if (signature[i] == '\0') {
parse_error = "unexpected end of signature string";
goto fail;
}
/*
* Is this a variable or a fixed size dimension?
*/
if (_is_alpha_underscore(signature[i])) {
frozen_size = -1;
}
else {
frozen_size = (npy_intp)_get_size(signature + i);
if (frozen_size <= 0) {
parse_error = "expect dimension name or non-zero frozen size";
goto fail;
}
}
/* Is this dimension flexible? */
i_end = _get_end_of_name(signature, i);
can_ignore = (i_end > 0 && signature[i_end - 1] == '?');
/*
* Determine whether we already saw this dimension name,
* get its index, and set its properties
*/
for(ix = 0; ix < ufunc->core_num_dim_ix; ix++) {
if (frozen_size > 0 ?
frozen_size == ufunc->core_dim_sizes[ix] :
_is_same_name(signature + i, var_names[ix])) {
break;
}
}
/*
* If a new dimension, store its properties; if old, check consistency.
*/
if (ix == ufunc->core_num_dim_ix) {
ufunc->core_num_dim_ix++;
var_names[ix] = signature + i;
ufunc->core_dim_sizes[ix] = frozen_size;
if (frozen_size < 0) {
ufunc->core_dim_flags[ix] |= UFUNC_CORE_DIM_SIZE_INFERRED;
}
if (can_ignore) {
ufunc->core_dim_flags[ix] |= UFUNC_CORE_DIM_CAN_IGNORE;
}
} else {
if (can_ignore && !(ufunc->core_dim_flags[ix] &
UFUNC_CORE_DIM_CAN_IGNORE)) {
parse_error = "? cannot be used, name already seen without ?";
goto fail;
}
if (!can_ignore && (ufunc->core_dim_flags[ix] &
UFUNC_CORE_DIM_CAN_IGNORE)) {
parse_error = "? must be used, name already seen with ?";
goto fail;
}
}
ufunc->core_dim_ixs[cur_core_dim] = ix;
cur_core_dim++;
nd++;
i = _next_non_white_space(signature, i_end);
if (signature[i] != ',' && signature[i] != ')') {
parse_error = "expect ',' or ')'";
goto fail;
}
if (signature[i] == ',')
{
i = _next_non_white_space(signature, i + 1);
if (signature[i] == ')') {
parse_error = "',' must not be followed by ')'";
goto fail;
}
}
}
ufunc->core_num_dims[cur_arg] = nd;
ufunc->core_offsets[cur_arg] = cur_core_dim-nd;
cur_arg++;
nd = 0;
i = _next_non_white_space(signature, i + 1);
if (cur_arg != ufunc->nin && cur_arg != ufunc->nargs) {
/*
* The list of input arguments (or output arguments) was
* only read partially
*/
if (signature[i] != ',') {
parse_error = "expect ','";
goto fail;
}
i = _next_non_white_space(signature, i + 1);
}
}
if (cur_arg != ufunc->nargs) {
parse_error = "incomplete signature: not all arguments found";
goto fail;
}
ufunc->core_dim_ixs = PyArray_realloc(ufunc->core_dim_ixs,
sizeof(int) * cur_core_dim);
ufunc->core_dim_sizes = PyArray_realloc(
ufunc->core_dim_sizes,
sizeof(npy_intp) * ufunc->core_num_dim_ix);
ufunc->core_dim_flags = PyArray_realloc(
ufunc->core_dim_flags,
sizeof(npy_uint32) * ufunc->core_num_dim_ix);
/* check for trivial core-signature, e.g. "(),()->()" */
if (cur_core_dim == 0) {
ufunc->core_enabled = 0;
}
PyArray_free((void*)var_names);
return 0;
fail:
PyArray_free((void*)var_names);
if (parse_error) {
PyErr_Format(PyExc_ValueError,
"%s at position %d in \"%s\"",
parse_error, i, signature);
}
return -1;
}
/*
* Checks if 'obj' is a valid output array for a ufunc, i.e. it is
* either None or a writeable array, increments its reference count
* and stores a pointer to it in 'store'. Returns 0 on success, sets
* an exception and returns -1 on failure.
*/
static int
_set_out_array(PyObject *obj, PyArrayObject **store)
{
if (obj == Py_None) {
/* Translate None to NULL */
return 0;
}
if (PyArray_Check(obj)) {
/* If it's an array, store it */
if (PyArray_FailUnlessWriteable((PyArrayObject *)obj,
"output array") < 0) {
return -1;
}
Py_INCREF(obj);
*store = (PyArrayObject *)obj;
return 0;
}
if (obj == Py_Ellipsis) {
PyErr_SetString(PyExc_TypeError,
"must use `...` as `out=...` and not per-operand/in a tuple");
return -1;
}
PyErr_SetString(PyExc_TypeError, "return arrays must be of ArrayType");
return -1;
}
/********* GENERIC UFUNC USING ITERATOR *********/
/*
* Produce a name for the ufunc, if one is not already set
* This is used in the PyUFunc_handlefperr machinery, and in error messages
*/
NPY_NO_EXPORT const char*
ufunc_get_name_cstr(PyUFuncObject *ufunc) {
return ufunc->name ? ufunc->name : "<unnamed ufunc>";
}
/*
* Converters for use in parsing of keywords arguments.
*/
static int
_subok_converter(PyObject *obj, npy_bool *subok)
{
if (PyBool_Check(obj)) {
*subok = (obj == Py_True);
return NPY_SUCCEED;
}
else {
PyErr_SetString(PyExc_TypeError,
"'subok' must be a boolean");
return NPY_FAIL;
}
}
static int
_keepdims_converter(PyObject *obj, int *keepdims)
{
if (PyBool_Check(obj)) {
*keepdims = (obj == Py_True);
return NPY_SUCCEED;
}
else {
PyErr_SetString(PyExc_TypeError,
"'keepdims' must be a boolean");
return NPY_FAIL;
}
}
static int
_wheremask_converter(PyObject *obj, PyArrayObject **wheremask)
{
/*
* Optimization: where=True is the same as no where argument.
* This lets us document True as the default.
*/
if (obj == Py_True) {
return NPY_SUCCEED;
}
else {
PyArray_Descr *dtype = PyArray_DescrFromType(NPY_BOOL);
if (dtype == NULL) {
return NPY_FAIL;
}
/* PyArray_FromAny steals reference to dtype, even on failure */
*wheremask = (PyArrayObject *)PyArray_FromAny(obj, dtype, 0, 0, 0, NULL);
if ((*wheremask) == NULL) {
return NPY_FAIL;
}
return NPY_SUCCEED;
}
}
/*
* Due to the array override, do the actual parameter conversion
* only in this step. This function takes the reference objects and
* parses them into the desired values.
* This function cleans up after itself and NULLs references on error,
* however, the caller has to ensure that `out_op[0:nargs]` and `out_whermeask`
* are NULL initialized.
*/
static int
convert_ufunc_arguments(PyUFuncObject *ufunc,
ufunc_full_args full_args, PyArrayObject *out_op[],
PyArray_DTypeMeta *out_op_DTypes[],
npy_bool *force_legacy_promotion,
npy_bool *promoting_pyscalars,
PyObject *order_obj, NPY_ORDER *out_order,
PyObject *casting_obj, NPY_CASTING *out_casting,
PyObject *subok_obj, npy_bool *out_subok,
PyObject *where_obj, PyArrayObject **out_wheremask, /* PyArray of bool */
PyObject *keepdims_obj, int *out_keepdims)
{
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = ufunc->nargs;
PyObject *obj;
/* Convert and fill in input arguments */
npy_bool all_scalar = NPY_TRUE;
npy_bool any_scalar = NPY_FALSE;
*force_legacy_promotion = NPY_FALSE;
*promoting_pyscalars = NPY_FALSE;
for (int i = 0; i < nin; i++) {
obj = PyTuple_GET_ITEM(full_args.in, i);
if (PyArray_Check(obj)) {
out_op[i] = (PyArrayObject *)obj;
Py_INCREF(out_op[i]);
}
else {
/* Convert the input to an array and check for special cases */
out_op[i] = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, NULL);
if (out_op[i] == NULL) {
goto fail;
}
}
out_op_DTypes[i] = NPY_DTYPE(PyArray_DESCR(out_op[i]));
Py_INCREF(out_op_DTypes[i]);
if (nin == 1) {
/*
* TODO: If nin == 1 we don't promote! This has exactly the effect
* that right now integers can still go to object/uint64 and
* their behavior is thus unchanged for unary ufuncs (like
* isnan). This is not ideal, but pragmatic...
* We should eventually have special loops for isnan and once
* we do, we may just deprecate all remaining ones (e.g.
* `negative(2**100)` not working as it is an object.)
*
* This is issue is part of the NEP 50 adoption.
*/
break;
}
if (PyArray_NDIM(out_op[i]) == 0) {
any_scalar = NPY_TRUE;
}
else {
all_scalar = NPY_FALSE;
continue;
}
/*
* Handle the "weak" Python scalars/literals. We use a special DType
* for these.
* Further, we mark the operation array with a special flag to indicate
* this. This is because the legacy dtype resolution makes use of
* `np.can_cast(operand, dtype)`. The flag is local to this use, but
* necessary to propagate the information to the legacy type resolution.
*/
if (npy_mark_tmp_array_if_pyscalar(obj, out_op[i], &out_op_DTypes[i])) {
if (PyArray_FLAGS(out_op[i]) & NPY_ARRAY_WAS_PYTHON_INT
&& PyArray_TYPE(out_op[i]) != NPY_LONG) {
/*
* When `np.array(integer)` is not the default integer (mainly
* object dtype), this confuses many type resolvers. Simply
* forcing a default integer array is unfortunately easiest.
* In this disables the optional NEP 50 warnings, but in
* practice when this happens we should _usually_ pick the
* default integer loop and that raises an error.
* (An exception is `float64(1.) + 10**100` which silently
* will give a float64 result rather than a Python float.)
*
* TODO: Just like the general dual NEP 50/legacy promotion
* support this is meant as a temporary hack for NumPy 1.25.
*/
Py_INCREF(npy_static_pydata.zero_pyint_like_arr);
Py_SETREF(out_op[i],
(PyArrayObject *)npy_static_pydata.zero_pyint_like_arr);
}
*promoting_pyscalars = NPY_TRUE;
}
}
if ((!all_scalar && any_scalar)) {
*force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL);
}
/* Convert and fill in output arguments */
memset(out_op_DTypes + nin, 0, nout * sizeof(*out_op_DTypes));
if (full_args.out != NULL) {
for (int i = 0; i < nout; i++) {
obj = PyTuple_GET_ITEM(full_args.out, i);
if (_set_out_array(obj, out_op + i + nin) < 0) {
goto fail;
}
if (out_op[i] != NULL) {
out_op_DTypes[i + nin] = NPY_DTYPE(PyArray_DESCR(out_op[i]));
Py_INCREF(out_op_DTypes[i + nin]);
}
}
}
/*
* Convert most arguments manually here, since it is easier to handle
* the ufunc override if we first parse only to objects.
*/
if (where_obj && !_wheremask_converter(where_obj, out_wheremask)) {
goto fail;
}
if (keepdims_obj && !_keepdims_converter(keepdims_obj, out_keepdims)) {
goto fail;
}
if (casting_obj && !PyArray_CastingConverter(casting_obj, out_casting)) {
goto fail;
}
if (order_obj && !PyArray_OrderConverter(order_obj, out_order)) {
goto fail;
}
if (subok_obj && !_subok_converter(subok_obj, out_subok)) {
goto fail;
}
return 0;
fail:
if (out_wheremask != NULL) {
Py_XSETREF(*out_wheremask, NULL);
}
for (int i = 0; i < nop; i++) {
Py_XSETREF(out_op[i], NULL);
}
return -1;
}
/*
* This checks whether a trivial loop is ok,
* making copies of scalar and one dimensional operands if that will
* help.
*
* Returns 1 if a trivial loop is ok, 0 if it is not, and
* -1 if there is an error.
*/
static int
check_for_trivial_loop(PyArrayMethodObject *ufuncimpl,
PyArrayObject **op, PyArray_Descr **dtypes,
NPY_CASTING casting, npy_intp buffersize)
{
int force_cast_input = ufuncimpl->flags & _NPY_METH_FORCE_CAST_INPUTS;
int i, nin = ufuncimpl->nin, nop = nin + ufuncimpl->nout;
for (i = 0; i < nop; ++i) {
/*
* If the dtype doesn't match, or the array isn't aligned,
* indicate that the trivial loop can't be done.
*/
if (op[i] == NULL) {
continue;
}
int must_copy = !PyArray_ISALIGNED(op[i]);
if (dtypes[i] != PyArray_DESCR(op[i])) {
npy_intp view_offset;
npy_intp is_safe = PyArray_SafeCast(
PyArray_DESCR(op[i]), dtypes[i], &view_offset, casting, 0);
if (is_safe < 0 && PyErr_Occurred()) {
/* A proper error during a cast check, should be rare */
return -1;
}
if (view_offset != 0) {
/* NOTE: Could possibly implement non-zero view offsets */
must_copy = 1;
}
if (force_cast_input && i < nin) {
/*
* ArrayMethod flagged to ignore casting (logical funcs
* can force cast to bool)
*/
}
else if (is_safe != 1) {
return 0; /* there was a cast error or cast is not safe enough */
}
}
if (must_copy) {
/*
* If op[j] is a scalar or small one dimensional
* array input, make a copy to keep the opportunity
* for a trivial loop. Outputs are not copied here.
*/
if (i < nin && (PyArray_NDIM(op[i]) == 0
|| (PyArray_NDIM(op[i]) == 1
&& PyArray_DIM(op[i], 0) <= buffersize))) {
PyArrayObject *tmp;
Py_INCREF(dtypes[i]);
tmp = (PyArrayObject *)PyArray_CastToType(op[i], dtypes[i], 0);
if (tmp == NULL) {
return -1;
}
Py_DECREF(op[i]);
op[i] = tmp;
}
else {
return 0;
}
}
}
return 1;
}
/*
* Check whether a trivial loop is possible and call the innerloop if it is.
* A trivial loop is defined as one where a single strided inner-loop call
* is possible.
*
* This function only supports a single output (due to the overlap check).
* It always accepts 0-D arrays and will broadcast them. The function
* cannot broadcast any other array (as it requires a single stride).
* The function accepts all 1-D arrays, and N-D arrays that are either all
* C- or all F-contiguous.
* NOTE: Broadcast outputs are implicitly rejected in the overlap detection.
*
* Returns -2 if a trivial loop is not possible, 0 on success and -1 on error.
*/
static int
try_trivial_single_output_loop(PyArrayMethod_Context *context,
PyArrayObject *op[], NPY_ORDER order,
int errormask)
{
int nin = context->method->nin;
int nop = nin + 1;
assert(context->method->nout == 1);
/* The order of all N-D contiguous operands, can be fixed by `order` */
int operation_order = 0;
if (order == NPY_CORDER) {
operation_order = NPY_ARRAY_C_CONTIGUOUS;
}
else if (order == NPY_FORTRANORDER) {
operation_order = NPY_ARRAY_F_CONTIGUOUS;
}
int operation_ndim = 0;
npy_intp *operation_shape = NULL;
npy_intp fixed_strides[NPY_MAXARGS];
for (int iop = 0; iop < nop; iop++) {
if (op[iop] == NULL) {
/* The out argument may be NULL (and only that one); fill later */
assert(iop == nin);
continue;
}
int op_ndim = PyArray_NDIM(op[iop]);
/* Special case 0-D since we can handle broadcasting using a 0-stride */
if (op_ndim == 0 && iop < nin) {
fixed_strides[iop] = 0;
continue;
}
/* First non 0-D op: fix dimensions, shape (order is fixed later) */
if (operation_ndim == 0) {
operation_ndim = op_ndim;
operation_shape = PyArray_SHAPE(op[iop]);
}
else if (op_ndim != operation_ndim) {
return -2; /* dimension mismatch (except 0-d input ops) */
}
else if (!PyArray_CompareLists(
operation_shape, PyArray_DIMS(op[iop]), op_ndim)) {
return -2; /* shape mismatch */
}
if (op_ndim == 1) {
fixed_strides[iop] = PyArray_STRIDES(op[iop])[0];
}
else {
fixed_strides[iop] = PyArray_ITEMSIZE(op[iop]); /* contiguous */
/* This op must match the operation order (and be contiguous) */
int op_order = (PyArray_FLAGS(op[iop]) &
(NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS));
if (op_order == 0) {
return -2; /* N-dimensional op must be contiguous */
}
else if (operation_order == 0) {
operation_order = op_order; /* op fixes order */
}
else if (operation_order != op_order) {
return -2;
}
}
}
if (op[nin] == NULL) {
Py_INCREF(context->descriptors[nin]);
op[nin] = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type,
context->descriptors[nin], operation_ndim, operation_shape,
NULL, NULL, operation_order==NPY_ARRAY_F_CONTIGUOUS, NULL);
if (op[nin] == NULL) {
return -1;
}
fixed_strides[nin] = context->descriptors[nin]->elsize;
}
else {
/* If any input overlaps with the output, we use the full path. */
for (int iop = 0; iop < nin; iop++) {
if (!PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(
op[iop], op[nin],
PyArray_TRIVIALLY_ITERABLE_OP_READ,
PyArray_TRIVIALLY_ITERABLE_OP_NOREAD)) {
return -2;
}
}
/* Check self-overlap (non 1-D are contiguous, perfect overlap is OK) */
if (operation_ndim == 1 &&
PyArray_STRIDES(op[nin])[0] < PyArray_ITEMSIZE(op[nin]) &&
PyArray_STRIDES(op[nin])[0] != 0) {
return -2;
}
}
/*
* We can use the trivial (single inner-loop call) optimization
* and `fixed_strides` holds the strides for that call.
*/
char *data[NPY_MAXARGS];
npy_intp count = PyArray_MultiplyList(operation_shape, operation_ndim);
if (count == 0) {
/* Nothing to do */
return 0;
}
NPY_BEGIN_THREADS_DEF;
PyArrayMethod_StridedLoop *strided_loop;
NpyAuxData *auxdata = NULL;
NPY_ARRAYMETHOD_FLAGS flags = 0;
if (context->method->get_strided_loop(context,
1, 0, fixed_strides,
&strided_loop, &auxdata, &flags) < 0) {
return -1;
}
for (int iop=0; iop < nop; iop++) {
data[iop] = PyArray_BYTES(op[iop]);
}
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
npy_clear_floatstatus_barrier((char *)context);
}
if (!(flags & NPY_METH_REQUIRES_PYAPI)) {
NPY_BEGIN_THREADS_THRESHOLDED(count);
}
int res = strided_loop(context, data, &count, fixed_strides, auxdata);
NPY_END_THREADS;
NPY_AUXDATA_FREE(auxdata);
/*
* An error should only be possible if `res != 0` is already set.
* But this is not strictly correct for old-style ufuncs (e.g. `power`
* released the GIL but manually set an Exception).
*/
if (PyErr_Occurred()) {
res = -1;
}
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
const char *name = ufunc_get_name_cstr((PyUFuncObject *)context->caller);
res = _check_ufunc_fperr(errormask, name);
}
return res;
}
/*
* Check casting: It would be nice to just move this into the iterator
* or pass in the full cast information. But this can special case
* the logical functions and prints a better error message.
*/
static inline int
validate_casting(PyArrayMethodObject *method, PyUFuncObject *ufunc,
PyArrayObject *ops[], PyArray_Descr *const descriptors_const[],
NPY_CASTING casting)
{
/* Cast away const to not change old public `PyUFunc_ValidateCasting`. */
PyArray_Descr **descriptors = (PyArray_Descr **)descriptors_const;
if (method->resolve_descriptors == &wrapped_legacy_resolve_descriptors) {
/*
* In this case the legacy type resolution was definitely called
* and we do not need to check (astropy/pyerfa relied on this).
*/
return 0;
}
if (method->flags & _NPY_METH_FORCE_CAST_INPUTS) {
if (PyUFunc_ValidateOutCasting(ufunc, casting, ops, descriptors) < 0) {
return -1;
}
}
else {
if (PyUFunc_ValidateCasting(ufunc, casting, ops, descriptors) < 0) {
return -1;
}
}
return 0;
}
/*
* The ufunc loop implementation for both normal ufunc calls and masked calls
* when the iterator has to be used.
*
* See `PyUFunc_GenericFunctionInternal` for more information (where this is
* called from).
*/
static int
execute_ufunc_loop(PyArrayMethod_Context *context, int masked,
PyArrayObject **op, NPY_ORDER order, npy_intp buffersize,
NPY_CASTING casting,
npy_uint32 *op_flags, int errormask)
{
PyUFuncObject *ufunc = (PyUFuncObject *)context->caller;
int nin = context->method->nin, nout = context->method->nout;
int nop = nin + nout;
if (validate_casting(context->method,
ufunc, op, context->descriptors, casting) < 0) {
return -1;
}
if (masked) {
assert(PyArray_TYPE(op[nop]) == NPY_BOOL);
/*
* NOTE: In the masked version, we consider the output read-write,
* this gives a best-effort of preserving the input, but does
* not always work. It could allow the operand to be copied
* due to copy-if-overlap, but only if it was passed in.
*/
for (int i = nin; i < nop; ++i) {
op_flags[i] |= (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY);
}
op_flags[nop] = NPY_ITER_READONLY | NPY_ITER_ARRAYMASK; /* mask */
}
NPY_UF_DBG_PRINT("Making iterator\n");
npy_uint32 iter_flags = ufunc->iter_flags |
NPY_ITER_EXTERNAL_LOOP |
NPY_ITER_REFS_OK |
NPY_ITER_ZEROSIZE_OK |
NPY_ITER_BUFFERED |
NPY_ITER_GROWINNER |
NPY_ITER_DELAY_BUFALLOC |
NPY_ITER_COPY_IF_OVERLAP;
/*
* Allocate the iterator. Because the types of the inputs
* were already checked, we use the casting rule 'unsafe' which
* is faster to calculate.
*/
NpyIter *iter = NpyIter_AdvancedNew(nop + masked, op,
iter_flags,
order, NPY_UNSAFE_CASTING,
op_flags, (PyArray_Descr **)context->descriptors,
-1, NULL, NULL, buffersize);
if (iter == NULL) {
return -1;
}
NPY_UF_DBG_PRINT("Made iterator\n");
/* Set newly allocated arrays as outputs */
PyArrayObject **op_it = NpyIter_GetOperandArray(iter);
for (int i = 0; i < nout; ++i) {
if (op[nin + i] == NULL) {
op[nin + i] = op_it[nin + i];
Py_INCREF(op[nin + i]);
}
}
/* Only do the loop if the iteration size is non-zero */
npy_intp full_size = NpyIter_GetIterSize(iter);
if (full_size == 0) {
if (!NpyIter_Deallocate(iter)) {
return -1;
}
return 0;
}
/*
* Get the inner loop, with the possibility of specialization
* based on the fixed strides.
*/
PyArrayMethod_StridedLoop *strided_loop;
NpyAuxData *auxdata = NULL;
npy_intp fixed_strides[NPY_MAXARGS];
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
NPY_ARRAYMETHOD_FLAGS flags = 0;
if (masked) {
if (PyArrayMethod_GetMaskedStridedLoop(context,
1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
NpyIter_Deallocate(iter);
return -1;
}
}
else {
if (context->method->get_strided_loop(context,
1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
NpyIter_Deallocate(iter);
return -1;
}
}
/* Get the variables needed for the loop */
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
NPY_AUXDATA_FREE(auxdata);
NpyIter_Deallocate(iter);
return -1;
}
char **dataptr = NpyIter_GetDataPtrArray(iter);
npy_intp *strides = NpyIter_GetInnerStrideArray(iter);
npy_intp *countptr = NpyIter_GetInnerLoopSizePtr(iter);
NPY_BEGIN_THREADS_DEF;
flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter));
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
npy_clear_floatstatus_barrier((char *)context);
}
if (!(flags & NPY_METH_REQUIRES_PYAPI)) {
NPY_BEGIN_THREADS_THRESHOLDED(full_size);
}
/* The reset may copy the first buffer chunk, which could cause FPEs */
if (NpyIter_Reset(iter, NULL) != NPY_SUCCEED) {
NPY_AUXDATA_FREE(auxdata);
NpyIter_Deallocate(iter);
return -1;
}
NPY_UF_DBG_PRINT("Actual inner loop:\n");
/* Execute the loop */
int res;
do {
NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*countptr);
res = strided_loop(context, dataptr, countptr, strides, auxdata);
} while (res == 0 && iternext(iter));
NPY_END_THREADS;
NPY_AUXDATA_FREE(auxdata);
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
const char *name = ufunc_get_name_cstr((PyUFuncObject *)context->caller);
res = _check_ufunc_fperr(errormask, name);
}
if (!NpyIter_Deallocate(iter)) {
return -1;
}
return res;
}
/*
* Validate that operands have enough dimensions, accounting for
* possible flexible dimensions that may be absent.
*/
static int
_validate_num_dims(PyUFuncObject *ufunc, PyArrayObject **op,
npy_uint32 *core_dim_flags,
int *op_core_num_dims) {
int i, j;
int nin = ufunc->nin;
int nop = ufunc->nargs;
for (i = 0; i < nop; i++) {
if (op[i] != NULL) {
int op_ndim = PyArray_NDIM(op[i]);
if (op_ndim < op_core_num_dims[i]) {
int core_offset = ufunc->core_offsets[i];
/* We've too few, but some dimensions might be flexible */
for (j = core_offset;
j < core_offset + ufunc->core_num_dims[i]; j++) {
int core_dim_index = ufunc->core_dim_ixs[j];
if ((core_dim_flags[core_dim_index] &
UFUNC_CORE_DIM_CAN_IGNORE)) {
int i1, j1, k;
/*
* Found a dimension that can be ignored. Flag that
* it is missing, and unflag that it can be ignored,
* since we are doing so already.
*/
core_dim_flags[core_dim_index] |= UFUNC_CORE_DIM_MISSING;
core_dim_flags[core_dim_index] ^= UFUNC_CORE_DIM_CAN_IGNORE;
/*
* Reduce the number of core dimensions for all
* operands that use this one (including ours),
* and check whether we're now OK.
*/
for (i1 = 0, k=0; i1 < nop; i1++) {
for (j1 = 0; j1 < ufunc->core_num_dims[i1]; j1++) {
if (ufunc->core_dim_ixs[k++] == core_dim_index) {
op_core_num_dims[i1]--;
}
}
}
if (op_ndim == op_core_num_dims[i]) {
break;
}
}
}
if (op_ndim < op_core_num_dims[i]) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d does not have enough "
"dimensions (has %d, gufunc core with "
"signature %s requires %d)",
ufunc_get_name_cstr(ufunc),
i < nin ? "Input" : "Output",
i < nin ? i : i - nin, PyArray_NDIM(op[i]),
ufunc->core_signature, op_core_num_dims[i]);
return -1;
}
}
}
}
return 0;
}
/*
* Check whether any of the outputs of a gufunc has core dimensions.
*/
static int
_has_output_coredims(PyUFuncObject *ufunc) {
int i;
for (i = ufunc->nin; i < ufunc->nin + ufunc->nout; ++i) {
if (ufunc->core_num_dims[i] > 0) {
return 1;
}
}
return 0;
}
/*
* Check whether the gufunc can be used with axis, i.e., that there is only
* a single, shared core dimension (which means that operands either have
* that dimension, or have no core dimensions). Returns 0 if all is fine,
* and sets an error and returns -1 if not.
*/
static int
_check_axis_support(PyUFuncObject *ufunc) {
if (ufunc->core_num_dim_ix != 1) {
PyErr_Format(PyExc_TypeError,
"%s: axis can only be used with a single shared core "
"dimension, not with the %d distinct ones implied by "
"signature %s.",
ufunc_get_name_cstr(ufunc),
ufunc->core_num_dim_ix,
ufunc->core_signature);
return -1;
}
return 0;
}
/*
* Check whether the gufunc can be used with keepdims, i.e., that all its
* input arguments have the same number of core dimension, and all output
* arguments have no core dimensions. Returns 0 if all is fine, and sets
* an error and returns -1 if not.
*/
static int
_check_keepdims_support(PyUFuncObject *ufunc) {
int i;
int nin = ufunc->nin, nout = ufunc->nout;
int input_core_dims = ufunc->core_num_dims[0];
for (i = 1; i < nin + nout; i++) {
if (ufunc->core_num_dims[i] != (i < nin ? input_core_dims : 0)) {
PyErr_Format(PyExc_TypeError,
"%s does not support keepdims: its signature %s requires "
"%s %d to have %d core dimensions, but keepdims can only "
"be used when all inputs have the same number of core "
"dimensions and all outputs have no core dimensions.",
ufunc_get_name_cstr(ufunc),
ufunc->core_signature,
i < nin ? "input" : "output",
i < nin ? i : i - nin,
ufunc->core_num_dims[i]);
return -1;
}
}
return 0;
}
/*
* Interpret a possible axes keyword argument, using it to fill the remap_axis
* array which maps default to actual axes for each operand, indexed as
* as remap_axis[iop][iaxis]. The default axis order has first all broadcast
* axes and then the core axes the gufunc operates on.
*
* Returns 0 on success, and -1 on failure
*/
static int
_parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nin = ufunc->nin;
int nop = ufunc->nargs;
int iop, list_size;
if (!PyList_Check(axes)) {
PyErr_SetString(PyExc_TypeError, "axes should be a list.");
return -1;
}
list_size = PyList_Size(axes);
if (list_size != nop) {
if (list_size != nin || _has_output_coredims(ufunc)) {
PyErr_Format(PyExc_ValueError,
"axes should be a list with an entry for all "
"%d inputs and outputs; entries for outputs can only "
"be omitted if none of them has core axes.",
nop);
return -1;
}
for (iop = nin; iop < nop; iop++) {
remap_axis[iop] = NULL;
}
}
for (iop = 0; iop < list_size; ++iop) {
int op_ndim, op_ncore, op_nbroadcast;
int have_seen_axis[NPY_MAXDIMS] = {0};
PyObject *op_axes_tuple, *axis_item;
int axis, op_axis;
op_ncore = op_core_num_dims[iop];
if (op[iop] != NULL) {
op_ndim = PyArray_NDIM(op[iop]);
op_nbroadcast = op_ndim - op_ncore;
}
else {
op_nbroadcast = broadcast_ndim;
op_ndim = broadcast_ndim + op_ncore;
}
/*
* Get axes tuple for operand. If not a tuple already, make it one if
* there is only one axis (its content is checked later).
*/
op_axes_tuple = PyList_GET_ITEM(axes, iop); // noqa: borrowed-ref - manual fix needed
if (PyTuple_Check(op_axes_tuple)) {
if (PyTuple_Size(op_axes_tuple) != op_ncore) {
/* must have been a tuple with too many entries. */
PyErr_Format(npy_static_pydata.AxisError,
"%s: operand %d has %d core dimensions, "
"but %zd dimensions are specified by axes tuple.",
ufunc_get_name_cstr(ufunc), iop, op_ncore,
PyTuple_Size(op_axes_tuple));
return -1;
}
Py_INCREF(op_axes_tuple);
}
else if (op_ncore == 1) {
op_axes_tuple = PyTuple_Pack(1, op_axes_tuple);
if (op_axes_tuple == NULL) {
return -1;
}
}
else {
/* If input is not an integer tell user that a tuple is needed */
if (error_converting(PyArray_PyIntAsInt(op_axes_tuple))) {
PyErr_Format(PyExc_TypeError,
"%s: axes item %d should be a tuple.",
ufunc_get_name_cstr(ufunc), iop);
return -1;
}
/* If it is a single integer, inform user that more are needed */
PyErr_Format(npy_static_pydata.AxisError,
"%s: operand %d has %d core dimensions, "
"but the axes item is a single integer.",
ufunc_get_name_cstr(ufunc), iop, op_ncore);
return -1;
}
/*
* Now create the remap, starting with the core dimensions, and then
* adding the remaining broadcast axes that are to be iterated over.
*/
for (axis = op_nbroadcast; axis < op_ndim; axis++) {
axis_item = PyTuple_GET_ITEM(op_axes_tuple, axis - op_nbroadcast);
op_axis = PyArray_PyIntAsInt(axis_item);
if (error_converting(op_axis) ||
(check_and_adjust_axis(&op_axis, op_ndim) < 0)) {
Py_DECREF(op_axes_tuple);
return -1;
}
if (have_seen_axis[op_axis]) {
PyErr_Format(PyExc_ValueError,
"axes item %d has value %d repeated",
iop, op_axis);
Py_DECREF(op_axes_tuple);
return -1;
}
have_seen_axis[op_axis] = 1;
remap_axis[iop][axis] = op_axis;
}
Py_DECREF(op_axes_tuple);
/*
* Fill the op_nbroadcast=op_ndim-op_ncore axes not yet set,
* using have_seen_axis to skip over entries set above.
*/
for (axis = 0, op_axis = 0; axis < op_nbroadcast; axis++) {
while (have_seen_axis[op_axis]) {
op_axis++;
}
remap_axis[iop][axis] = op_axis++;
}
/*
* Check whether we are actually remapping anything. Here,
* op_axis can only equal axis if all broadcast axes were the same
* (i.e., the while loop above was never entered).
*/
if (axis == op_axis) {
while (axis < op_ndim && remap_axis[iop][axis] == axis) {
axis++;
}
}
if (axis == op_ndim) {
remap_axis[iop] = NULL;
}
} /* end of for(iop) loop over operands */
return 0;
}
/*
* Simplified version of the above, using axis to fill the remap_axis
* array, which maps default to actual axes for each operand, indexed as
* as remap_axis[iop][iaxis]. The default axis order has first all broadcast
* axes and then the core axes the gufunc operates on.
*
* Returns 0 on success, and -1 on failure
*/
static int
_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nop = ufunc->nargs;
int iop, axis_int;
axis_int = PyArray_PyIntAsInt(axis);
if (error_converting(axis_int)) {
return -1;
}
for (iop = 0; iop < nop; ++iop) {
int axis, op_ndim, op_axis;
/* _check_axis_support ensures core_num_dims is 0 or 1 */
if (core_num_dims[iop] == 0) {
remap_axis[iop] = NULL;
continue;
}
if (op[iop]) {
op_ndim = PyArray_NDIM(op[iop]);
}
else {
op_ndim = broadcast_ndim + 1;
}
op_axis = axis_int; /* ensure we don't modify axis_int */
if (check_and_adjust_axis(&op_axis, op_ndim) < 0) {
return -1;
}
/* Are we actually remapping away from last axis? */
if (op_axis == op_ndim - 1) {
remap_axis[iop] = NULL;
continue;
}
remap_axis[iop][op_ndim - 1] = op_axis;
for (axis = 0; axis < op_axis; axis++) {
remap_axis[iop][axis] = axis;
}
for (axis = op_axis; axis < op_ndim - 1; axis++) {
remap_axis[iop][axis] = axis + 1;
}
} /* end of for(iop) loop over operands */
return 0;
}
#define REMAP_AXIS(iop, axis) ((remap_axis != NULL && \
remap_axis[iop] != NULL)? \
remap_axis[iop][axis] : axis)
/*
* Validate the core dimensions of all the operands, and collect all of
* the labelled core dimensions into 'core_dim_sizes'.
*
* Returns 0 on success, and -1 on failure
*
* The behavior has been changed in NumPy 1.16.0, and the following
* requirements must be fulfilled or an error will be raised:
* * Arguments, both input and output, must have at least as many
* dimensions as the corresponding number of core dimensions. In
* versions before 1.10, 1's were prepended to the shape as needed.
* * Core dimensions with same labels must have exactly matching sizes.
* In versions before 1.10, core dimensions of size 1 would broadcast
* against other core dimensions with the same label.
* * All core dimensions must have their size specified by a passed in
* input or output argument. In versions before 1.10, core dimensions in
* an output argument that were not specified in an input argument,
* and whose size could not be inferred from a passed in output
* argument, would have their size set to 1.
* * Core dimensions may be fixed, new in NumPy 1.16
*/
static int
_get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
const int *op_core_num_dims, npy_uint32 *core_dim_flags,
npy_intp *core_dim_sizes, int **remap_axis) {
int i;
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = nin + nout;
for (i = 0; i < nop; ++i) {
if (op[i] != NULL) {
int idim;
int dim_offset = ufunc->core_offsets[i];
int core_start_dim = PyArray_NDIM(op[i]) - op_core_num_dims[i];
int dim_delta = 0;
/* checked before this routine gets called */
assert(core_start_dim >= 0);
/*
* Make sure every core dimension exactly matches all other core
* dimensions with the same label. Note that flexible dimensions
* may have been removed at this point, if so, they are marked
* with UFUNC_CORE_DIM_MISSING.
*/
for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
int core_index = dim_offset + idim;
int core_dim_index = ufunc->core_dim_ixs[core_index];
npy_intp core_dim_size = core_dim_sizes[core_dim_index];
npy_intp op_dim_size;
/* can only happen if flexible; dimension missing altogether */
if (core_dim_flags[core_dim_index] & UFUNC_CORE_DIM_MISSING) {
op_dim_size = 1;
dim_delta++; /* for indexing in dimensions */
}
else {
op_dim_size = PyArray_DIM(op[i],
REMAP_AXIS(i, core_start_dim + idim - dim_delta));
}
if (core_dim_sizes[core_dim_index] < 0) {
core_dim_sizes[core_dim_index] = op_dim_size;
}
else if (op_dim_size != core_dim_size) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d has a mismatch in its "
"core dimension %d, with gufunc "
"signature %s (size %zd is different "
"from %zd)",
ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
i < nin ? i : i - nin, idim - dim_delta,
ufunc->core_signature, op_dim_size,
core_dim_sizes[core_dim_index]);
return -1;
}
}
}
}
if (ufunc->process_core_dims_func != NULL) {
int status = ufunc->process_core_dims_func(ufunc, core_dim_sizes);
if (status != 0) {
return -1;
}
}
/*
* Make sure no core dimension is unspecified.
*/
for (i = nin; i < nop; ++i) {
int idim;
int dim_offset = ufunc->core_offsets[i];
for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
int core_dim_index = ufunc->core_dim_ixs[dim_offset + idim];
/* check all cases where the size has not yet been set */
if (core_dim_sizes[core_dim_index] < 0) {
/*
* Oops, this dimension was never specified
* (can only happen if output op not given)
*/
PyErr_Format(PyExc_ValueError,
"%s: Output operand %d has core dimension %d "
"unspecified, with gufunc signature %s",
ufunc_get_name_cstr(ufunc), i - nin, idim,
ufunc->core_signature);
return -1;
}
}
}
return 0;
}
/*
* Returns a new reference to the ufunc identity. Note that this identity
* is only a default identity value stored on the ufunc, since the invidiual
* ufunc loop (ArrayMethod) is queried for the actual identity.
*
* TODO: store a reference in the ufunc object itself, rather than
* constructing one each time
*/
NPY_NO_EXPORT PyObject *
PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable)
{
switch(ufunc->identity) {
case PyUFunc_One:
*reorderable = 1;
return PyLong_FromLong(1);
case PyUFunc_Zero:
*reorderable = 1;
return PyLong_FromLong(0);
case PyUFunc_MinusOne:
*reorderable = 1;
return PyLong_FromLong(-1);
case PyUFunc_ReorderableNone:
*reorderable = 1;
Py_RETURN_NONE;
case PyUFunc_None:
*reorderable = 0;
Py_RETURN_NONE;
case PyUFunc_IdentityValue:
*reorderable = 1;
Py_INCREF(ufunc->identity_value);
return ufunc->identity_value;
default:
PyErr_Format(PyExc_ValueError,
"ufunc %s has an invalid identity", ufunc_get_name_cstr(ufunc));
return NULL;
}
}
/*
* Copy over parts of the ufunc structure that may need to be
* changed during execution. Returns 0 on success; -1 otherwise.
*/
static int
_initialize_variable_parts(PyUFuncObject *ufunc,
int op_core_num_dims[],
npy_intp core_dim_sizes[],
npy_uint32 core_dim_flags[]) {
int i;
for (i = 0; i < ufunc->nargs; i++) {
op_core_num_dims[i] = ufunc->core_num_dims[i];
}
for (i = 0; i < ufunc->core_num_dim_ix; i++) {
core_dim_sizes[i] = ufunc->core_dim_sizes[i];
core_dim_flags[i] = ufunc->core_dim_flags[i];
}
return 0;
}
static int
PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc,
PyArrayMethodObject *ufuncimpl, PyArray_Descr *operation_descrs[],
PyArrayObject *op[], NPY_CASTING casting, NPY_ORDER order,
PyObject *axis, PyObject *axes, int keepdims)
{
int nin, nout;
int i, j, idim, nop;
const char *ufunc_name;
int retval;
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
int op_core_num_dims[NPY_MAXARGS];
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
npy_uint32 core_dim_flags[NPY_MAXARGS];
npy_uint32 op_flags[NPY_MAXARGS];
npy_intp iter_shape[NPY_MAXARGS];
NpyIter *iter = NULL;
npy_uint32 iter_flags;
npy_intp total_problem_size;
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
/* The dimensions which get passed to the inner loop */
npy_intp inner_dimensions[NPY_MAXDIMS+1];
/* The strides which get passed to the inner loop */
npy_intp *inner_strides = NULL;
/* Auxiliary data allocated by the ufuncimpl (ArrayMethod) */
NpyAuxData *auxdata = NULL;
/* The sizes of the core dimensions (# entries is ufunc->core_num_dim_ix) */
npy_intp *core_dim_sizes = inner_dimensions + 1;
int core_dim_ixs_size;
/* swapping around of axes */
int *remap_axis_memory = NULL;
int **remap_axis = NULL;
nin = ufunc->nin;
nout = ufunc->nout;
nop = nin + nout;
ufunc_name = ufunc_get_name_cstr(ufunc);
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
if (validate_casting(ufuncimpl,
ufunc, op, operation_descrs, casting) < 0) {
return -1;
}
/* Initialize possibly variable parts to the values from the ufunc */
retval = _initialize_variable_parts(ufunc, op_core_num_dims,
core_dim_sizes, core_dim_flags);
if (retval < 0) {
goto fail;
}
/*
* If keepdims was passed in (and thus changed from the initial value
* on top), check the gufunc is suitable, i.e., that its inputs share
* the same number of core dimensions, and its outputs have none.
*/
if (keepdims != -1) {
retval = _check_keepdims_support(ufunc);
if (retval < 0) {
goto fail;
}
}
if (axis != NULL) {
retval = _check_axis_support(ufunc);
if (retval < 0) {
goto fail;
}
}
/*
* If keepdims is set and true, which means all input dimensions are
* the same, signal that all output dimensions will be the same too.
*/
if (keepdims == 1) {
int num_dims = op_core_num_dims[0];
for (i = nin; i < nop; ++i) {
op_core_num_dims[i] = num_dims;
}
}
else {
/* keepdims was not set or was false; no adjustment necessary */
keepdims = 0;
}
/*
* Check that operands have the minimum dimensions required.
* (Just checks core; broadcast dimensions are tested by the iterator.)
*/
retval = _validate_num_dims(ufunc, op, core_dim_flags,
op_core_num_dims);
if (retval < 0) {
goto fail;
}
/*
* Figure out the number of iteration dimensions, which
* is the broadcast result of all the non-core dimensions.
* (We do allow outputs to broadcast inputs currently, if they are given.
* This is in line with what normal ufuncs do.)
*/
broadcast_ndim = 0;
for (i = 0; i < nop; ++i) {
if (op[i] == NULL) {
continue;
}
int n = PyArray_NDIM(op[i]) - op_core_num_dims[i];
if (n > broadcast_ndim) {
broadcast_ndim = n;
}
}
/* Possibly remap axes. */
if (axes != NULL || axis != NULL) {
assert(!(axes != NULL && axis != NULL));
remap_axis = PyArray_malloc(sizeof(remap_axis[0]) * nop);
remap_axis_memory = PyArray_malloc(sizeof(remap_axis_memory[0]) *
nop * NPY_MAXDIMS);
if (remap_axis == NULL || remap_axis_memory == NULL) {
PyErr_NoMemory();
goto fail;
}
for (i=0; i < nop; i++) {
remap_axis[i] = remap_axis_memory + i * NPY_MAXDIMS;
}
if (axis) {
retval = _parse_axis_arg(ufunc, op_core_num_dims, axis, op,
broadcast_ndim, remap_axis);
}
else {
retval = _parse_axes_arg(ufunc, op_core_num_dims, axes, op,
broadcast_ndim, remap_axis);
}
if(retval < 0) {
goto fail;
}
}
/* Collect the lengths of the labelled core dimensions */
retval = _get_coredim_sizes(ufunc, op, op_core_num_dims, core_dim_flags,
core_dim_sizes, remap_axis);
if(retval < 0) {
goto fail;
}
/*
* Figure out the number of iterator creation dimensions,
* which is the broadcast dimensions + all the core dimensions of
* the outputs, so that the iterator can allocate those output
* dimensions following the rules of order='F', for example.
*/
iter_ndim = broadcast_ndim;
for (i = nin; i < nop; ++i) {
iter_ndim += op_core_num_dims[i];
}
if (iter_ndim > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
"too many dimensions for generalized ufunc %s",
ufunc_name);
retval = -1;
goto fail;
}
/* Fill in the initial part of 'iter_shape' */
for (idim = 0; idim < broadcast_ndim; ++idim) {
iter_shape[idim] = -1;
}
/* Fill in op_axes for all the operands */
j = broadcast_ndim;
for (i = 0; i < nop; ++i) {
int n;
if (op[i]) {
n = PyArray_NDIM(op[i]) - op_core_num_dims[i];
}
else {
n = broadcast_ndim;
}
/* Broadcast all the unspecified dimensions normally */
for (idim = 0; idim < broadcast_ndim; ++idim) {
if (idim >= broadcast_ndim - n) {
op_axes_arrays[i][idim] =
REMAP_AXIS(i, idim - (broadcast_ndim - n));
}
else {
op_axes_arrays[i][idim] = -1;
}
}
/*
* Any output core dimensions shape should be ignored, so we add
* it as a Reduce dimension (which can be broadcast with the rest).
* These will be removed before the actual iteration for gufuncs.
*/
for (idim = broadcast_ndim; idim < iter_ndim; ++idim) {
op_axes_arrays[i][idim] = NPY_ITER_REDUCTION_AXIS(-1);
}
/* Except for when it belongs to this output */
if (i >= nin) {
int dim_offset = ufunc->core_offsets[i];
int num_removed = 0;
/*
* Fill in 'iter_shape' and 'op_axes' for the core dimensions
* of this output. Here, we have to be careful: if keepdims
* was used, then the axes are not real core dimensions, but
* are being added back for broadcasting, so their size is 1.
* If the axis was removed, we should skip altogether.
*/
if (keepdims) {
for (idim = 0; idim < op_core_num_dims[i]; ++idim) {
iter_shape[j] = 1;
op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
++j;
}
}
else {
for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
int core_index = dim_offset + idim;
int core_dim_index = ufunc->core_dim_ixs[core_index];
if ((core_dim_flags[core_dim_index] &
UFUNC_CORE_DIM_MISSING)) {
/* skip it */
num_removed++;
continue;
}
iter_shape[j] = core_dim_sizes[ufunc->core_dim_ixs[core_index]];
op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim - num_removed);
++j;
}
}
}
op_axes[i] = op_axes_arrays[i];
}
#if NPY_UF_DBG_TRACING
printf("iter shapes:");
for (j=0; j < iter_ndim; j++) {
printf(" %ld", iter_shape[j]);
}
printf("\n");
#endif
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
retval = -1;
goto fail;
}
NPY_UF_DBG_PRINT("Finding inner loop\n");
/*
* We don't write to all elements, and the iterator may make
* UPDATEIFCOPY temporary copies. The output arrays (unless they are
* allocated by the iterator itself) must be considered READWRITE by the
* iterator, so that the elements we don't write to are copied to the
* possible temporary array.
*/
_ufunc_setup_flags(ufunc, NPY_ITER_COPY | NPY_UFUNC_DEFAULT_INPUT_FLAGS,
NPY_ITER_UPDATEIFCOPY |
NPY_ITER_WRITEONLY |
NPY_UFUNC_DEFAULT_OUTPUT_FLAGS,
op_flags);
/*
* Set up the iterator per-op flags. For generalized ufuncs, we
* can't do buffering, so must COPY or UPDATEIFCOPY.
*/
iter_flags = ufunc->iter_flags |
NPY_ITER_MULTI_INDEX |
NPY_ITER_REFS_OK |
NPY_ITER_ZEROSIZE_OK |
NPY_ITER_COPY_IF_OVERLAP |
NPY_ITER_DELAY_BUFALLOC;
/* Create the iterator */
iter = NpyIter_AdvancedNew(nop, op, iter_flags,
order, NPY_UNSAFE_CASTING, op_flags,
operation_descrs, iter_ndim,
op_axes, iter_shape, 0);
if (iter == NULL) {
retval = -1;
goto fail;
}
/* Fill in any allocated outputs */
{
PyArrayObject **operands = NpyIter_GetOperandArray(iter);
for (i = nin; i < nop; ++i) {
if (op[i] == NULL) {
op[i] = operands[i];
Py_INCREF(op[i]);
}
}
}
/*
* Set up the inner strides array. Because we're not doing
* buffering, the strides are fixed throughout the looping.
*/
core_dim_ixs_size = 0;
for (i = 0; i < nop; ++i) {
core_dim_ixs_size += ufunc->core_num_dims[i];
}
inner_strides = (npy_intp *)PyArray_malloc(
NPY_SIZEOF_INTP * (nop+core_dim_ixs_size));
if (inner_strides == NULL) {
PyErr_NoMemory();
retval = -1;
goto fail;
}
/* Copy the strides after the first nop */
idim = nop;
for (i = 0; i < nop; ++i) {
/*
* Need to use the arrays in the iterator, not op, because
* a copy with a different-sized type may have been made.
*/
PyArrayObject *arr = NpyIter_GetOperandArray(iter)[i];
npy_intp *shape = PyArray_SHAPE(arr);
npy_intp *strides = PyArray_STRIDES(arr);
/*
* Could be negative if flexible dims are used, but not for
* keepdims, since those dimensions are allocated in arr.
*/
int core_start_dim = PyArray_NDIM(arr) - op_core_num_dims[i];
int num_removed = 0;
int dim_offset = ufunc->core_offsets[i];
for (j = 0; j < ufunc->core_num_dims[i]; ++j) {
int core_dim_index = ufunc->core_dim_ixs[dim_offset + j];
/*
* Force zero stride when the shape is 1 (always the case for
* for missing dimensions), so that broadcasting works right.
*/
if (core_dim_flags[core_dim_index] & UFUNC_CORE_DIM_MISSING) {
num_removed++;
inner_strides[idim++] = 0;
}
else {
int remapped_axis = REMAP_AXIS(i, core_start_dim + j - num_removed);
if (shape[remapped_axis] != 1) {
inner_strides[idim++] = strides[remapped_axis];
} else {
inner_strides[idim++] = 0;
}
}
}
}
total_problem_size = NpyIter_GetIterSize(iter);
if (total_problem_size < 0) {
/*
* Only used for threading, if negative (this means that it is
* larger then ssize_t before axes removal) assume that the actual
* problem is large enough to be threaded usefully.
*/
total_problem_size = 1000;
}
/* Remove all the core output dimensions from the iterator */
for (i = broadcast_ndim; i < iter_ndim; ++i) {
if (NpyIter_RemoveAxis(iter, broadcast_ndim) != NPY_SUCCEED) {
retval = -1;
goto fail;
}
}
if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) {
retval = -1;
goto fail;
}
if (NpyIter_EnableExternalLoop(iter) != NPY_SUCCEED) {
retval = -1;
goto fail;
}
/*
* The first nop strides are for the inner loop (but only can
* copy them after removing the core axes). The strides will not change
* if the iterator is not buffered (they are effectively fixed).
* Supporting buffering would make sense, but probably would have to be
* done in the inner-loop itself (not the iterator).
*/
assert(!NpyIter_IsBuffered(iter));
memcpy(inner_strides, NpyIter_GetInnerStrideArray(iter),
NPY_SIZEOF_INTP * nop);
/* Final preparation of the arraymethod call */
PyArrayMethod_Context context;
NPY_context_init(&context, operation_descrs);
context.caller = (PyObject *)ufunc;
context.method = ufuncimpl;
PyArrayMethod_StridedLoop *strided_loop;
NPY_ARRAYMETHOD_FLAGS flags = 0;
if (ufuncimpl->get_strided_loop(&context, 1, 0, inner_strides,
&strided_loop, &auxdata, &flags) < 0) {
goto fail;
}
flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter));
int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* Start with the floating-point exception flags cleared */
npy_clear_floatstatus_barrier((char*)&iter);
}
NPY_UF_DBG_PRINT("Executing inner loop\n");
if (NpyIter_GetIterSize(iter) != 0) {
/* Do the ufunc loop */
NpyIter_IterNextFunc *iternext;
char **dataptr;
npy_intp *count_ptr;
NPY_BEGIN_THREADS_DEF;
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
retval = -1;
goto fail;
}
dataptr = NpyIter_GetDataPtrArray(iter);
count_ptr = NpyIter_GetInnerLoopSizePtr(iter);
if (!needs_api) {
NPY_BEGIN_THREADS_THRESHOLDED(total_problem_size);
}
do {
inner_dimensions[0] = *count_ptr;
retval = strided_loop(&context,
dataptr, inner_dimensions, inner_strides, auxdata);
} while (retval == 0 && iternext(iter));
if (!needs_api) {
NPY_END_THREADS;
}
}
if (retval == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
retval = _check_ufunc_fperr(errormask, ufunc_name);
}
PyArray_free(inner_strides);
NPY_AUXDATA_FREE(auxdata);
if (!NpyIter_Deallocate(iter)) {
retval = -1;
}
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
NPY_UF_DBG_PRINT1("Returning code %d\n", retval);
return retval;
fail:
NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval);
PyArray_free(inner_strides);
NPY_AUXDATA_FREE(auxdata);
NpyIter_Deallocate(iter);
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
return retval;
}
static int
PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc,
PyArrayMethodObject *ufuncimpl, PyArray_Descr *operation_descrs[],
PyArrayObject *op[], NPY_CASTING casting, NPY_ORDER order,
PyArrayObject *wheremask)
{
int nin = ufunc->nin, nout = ufunc->nout, nop = nin + nout;
npy_intp default_op_out_flags;
npy_uint32 op_flags[NPY_MAXARGS];
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_get_name_cstr(ufunc));
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return -1;
}
if (wheremask != NULL) {
/* Set up the flags. */
default_op_out_flags = NPY_ITER_NO_SUBTYPE |
NPY_ITER_WRITEMASKED |
NPY_UFUNC_DEFAULT_OUTPUT_FLAGS;
_ufunc_setup_flags(ufunc, NPY_UFUNC_DEFAULT_INPUT_FLAGS,
default_op_out_flags, op_flags);
}
else {
/* Set up the flags. */
default_op_out_flags = NPY_ITER_WRITEONLY |
NPY_UFUNC_DEFAULT_OUTPUT_FLAGS;
_ufunc_setup_flags(ufunc, NPY_UFUNC_DEFAULT_INPUT_FLAGS,
default_op_out_flags, op_flags);
}
/* Final preparation of the arraymethod call */
PyArrayMethod_Context context;
NPY_context_init(&context, operation_descrs);
context.caller = (PyObject *)ufunc;
context.method = ufuncimpl;
/* Do the ufunc loop */
if (wheremask != NULL) {
NPY_UF_DBG_PRINT("Executing masked inner loop\n");
if (nop + 1 > NPY_MAXARGS) {
PyErr_SetString(PyExc_ValueError,
"Too many operands when including where= parameter");
return -1;
}
op[nop] = wheremask;
operation_descrs[nop] = NULL;
return execute_ufunc_loop(&context, 1,
op, order, buffersize, casting,
op_flags, errormask);
}
else {
NPY_UF_DBG_PRINT("Executing normal inner loop\n");
/*
* This checks whether a trivial loop is ok, making copies of
* scalar and one dimensional operands if that should help.
*/
int trivial_ok = check_for_trivial_loop(ufuncimpl,
op, operation_descrs, casting, buffersize);
if (trivial_ok < 0) {
return -1;
}
if (trivial_ok && context.method->nout == 1) {
/* Try to handle everything without using the (heavy) iterator */
int retval = try_trivial_single_output_loop(&context,
op, order, errormask);
if (retval != -2) {
return retval;
}
}
return execute_ufunc_loop(&context, 0,
op, order, buffersize, casting, op_flags, errormask);
}
}
/*
* Promote and resolve a reduction like operation.
*
* @param ufunc
* @param arr The operation array
* @param out The output array or NULL if not provided. Note that NumPy always
* used out to mean the same as `dtype=out.dtype` and never passed
* the array itself to the type-resolution.
* @param signature The DType signature, which may already be set due to the
* dtype passed in by the user, or the special cases (add, multiply).
* (Contains strong references and may be modified.)
* @param enforce_uniform_args If `NPY_TRUE` fully uniform dtypes/descriptors
* are enforced as required for accumulate and (currently) reduceat.
* @param out_descrs New references to the resolved descriptors (on success).
* @param method The ufunc method, "reduce", "reduceat", or "accumulate".
* @returns ufuncimpl The `ArrayMethod` implementation to use. Or NULL if an
* error occurred.
*/
static PyArrayMethodObject *
reducelike_promote_and_resolve(PyUFuncObject *ufunc,
PyArrayObject *arr, PyArrayObject *out,
PyArray_DTypeMeta *signature[3],
npy_bool enforce_uniform_args, PyArray_Descr *out_descrs[3],
NPY_CASTING casting, char *method)
{
/*
* If no dtype is specified and out is not specified, we override the
* integer and bool dtype used for add and multiply.
*
* TODO: The following should be handled by a promoter!
*/
if (signature[0] == NULL && out == NULL) {
/*
* For integer types --- make sure at least a long
* is used for add and multiply reduction to avoid overflow
*/
int typenum = PyArray_TYPE(arr);
if ((PyTypeNum_ISBOOL(typenum) || PyTypeNum_ISINTEGER(typenum))
&& ((strcmp(ufunc->name, "add") == 0)
|| (strcmp(ufunc->name, "multiply") == 0))) {
if (PyTypeNum_ISBOOL(typenum)) {
typenum = NPY_INTP;
}
else if ((size_t)PyArray_ITEMSIZE(arr) < sizeof(npy_intp)) {
if (PyTypeNum_ISUNSIGNED(typenum)) {
typenum = NPY_UINTP;
}
else {
typenum = NPY_INTP;
}
}
signature[0] = PyArray_DTypeFromTypeNum(typenum);
}
}
assert(signature[2] == NULL); /* we always fill it here */
Py_XINCREF(signature[0]);
signature[2] = signature[0];
/*
* Note that the `ops` is not really correct. But legacy resolution
* cannot quite handle the correct ops (e.g. a NULL first item if `out`
* is NULL) so we pass `arr` instead in that case.
*/
PyArrayObject *ops[3] = {out ? out : arr, arr, out};
/*
* TODO: If `out` is not provided, arguably `initial` could define
* the first DType (and maybe also the out one), that way
* `np.add.reduce([1, 2, 3], initial=3.4)` would return a float
* value. As of 1.20, it returned an integer, so that should
* probably go to an error/warning first.
*/
PyArray_DTypeMeta *operation_DTypes[3] = {
NULL, NPY_DTYPE(PyArray_DESCR(arr)), NULL};
Py_INCREF(operation_DTypes[1]);
if (out != NULL) {
operation_DTypes[0] = NPY_DTYPE(PyArray_DESCR(out));
Py_INCREF(operation_DTypes[0]);
operation_DTypes[2] = operation_DTypes[0];
Py_INCREF(operation_DTypes[2]);
}
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE);
if (ufuncimpl == NULL) {
/* DTypes may currently get filled in fallbacks and XDECREF for error: */
Py_XDECREF(operation_DTypes[0]);
Py_XDECREF(operation_DTypes[1]);
Py_XDECREF(operation_DTypes[2]);
return NULL;
}
/*
* Find the correct descriptors for the operation. We use unsafe casting
* for historic reasons: The logic ufuncs required it to cast everything to
* boolean. However, we now special case the logical ufuncs, so that the
* casting safety could in principle be set to the default same-kind.
* (although this should possibly happen through a deprecation)
*/
int res = resolve_descriptors(3, ufunc, ufuncimpl,
ops, out_descrs, signature, operation_DTypes, NULL, casting);
Py_XDECREF(operation_DTypes[0]);
Py_XDECREF(operation_DTypes[1]);
Py_XDECREF(operation_DTypes[2]);
if (res < 0) {
return NULL;
}
/*
* The first operand and output should be the same array, so they should
* be identical. The second argument can be different for reductions,
* but is checked to be identical for accumulate and reduceat.
* Ideally, the type-resolver ensures that all are identical, but we do
* not enforce this here strictly. Otherwise correct handling of
* byte-order changes (or metadata) requires a lot of care; see gh-20699.
*/
if (!PyArray_EquivTypes(out_descrs[0], out_descrs[2]) || (
enforce_uniform_args && !PyArray_EquivTypes(
out_descrs[0], out_descrs[1]))) {
PyErr_Format(PyExc_TypeError,
"the resolved dtypes are not compatible with %s.%s. "
"Resolved (%R, %R, %R)",
ufunc_get_name_cstr(ufunc), method,
out_descrs[0], out_descrs[1], out_descrs[2]);
goto fail;
}
/*
* After checking that they are equivalent, we enforce the use of the out
* one (which the user should have defined). (Needed by string dtype)
*/
Py_INCREF(out_descrs[2]);
Py_SETREF(out_descrs[0], out_descrs[2]);
/* TODO: This really should _not_ be unsafe casting (same above)! */
if (validate_casting(ufuncimpl, ufunc, ops, out_descrs, casting) < 0) {
goto fail;
}
return ufuncimpl;
fail:
for (int i = 0; i < 3; ++i) {
Py_CLEAR(out_descrs[i]);
}
return NULL;
}
static int
reduce_loop(PyArrayMethod_Context *context,
PyArrayMethod_StridedLoop *strided_loop, NpyAuxData *auxdata,
NpyIter *iter, char **dataptrs, npy_intp const *strides,
npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
int needs_api, npy_intp skip_first_count)
{
int retval = 0;
char *dataptrs_copy[4];
npy_intp strides_copy[4];
npy_bool masked;
NPY_BEGIN_THREADS_DEF;
/* Get the number of operands, to determine whether "where" is used */
masked = (NpyIter_GetNOp(iter) == 3);
if (!needs_api) {
NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter));
}
if (skip_first_count > 0) {
assert(!masked); /* Path currently not available for masked */
while (1) {
npy_intp count = *countptr;
/* Skip any first-visit elements */
if (NpyIter_IsFirstVisit(iter, 0)) {
if (strides[0] == 0) {
--count;
--skip_first_count;
dataptrs[1] += strides[1];
}
else {
skip_first_count -= count;
count = 0;
}
}
if (count > 0) {
/* Turn the two items into three for the inner loop */
dataptrs_copy[0] = dataptrs[0];
dataptrs_copy[1] = dataptrs[1];
dataptrs_copy[2] = dataptrs[0];
strides_copy[0] = strides[0];
strides_copy[1] = strides[1];
strides_copy[2] = strides[0];
retval = strided_loop(context,
dataptrs_copy, &count, strides_copy, auxdata);
if (retval < 0) {
goto finish_loop;
}
}
/* Advance loop, and abort on error (or finish) */
if (!iternext(iter)) {
goto finish_loop;
}
/* When skipping is done break and continue with faster loop */
if (skip_first_count == 0) {
break;
}
}
}
do {
/* Turn the two items into three for the inner loop */
dataptrs_copy[0] = dataptrs[0];
dataptrs_copy[1] = dataptrs[1];
dataptrs_copy[2] = dataptrs[0];
strides_copy[0] = strides[0];
strides_copy[1] = strides[1];
strides_copy[2] = strides[0];
if (masked) {
dataptrs_copy[3] = dataptrs[2];
strides_copy[3] = strides[2];
}
retval = strided_loop(context,
dataptrs_copy, countptr, strides_copy, auxdata);
if (retval < 0) {
goto finish_loop;
}
} while (iternext(iter));
finish_loop:
NPY_END_THREADS;
return retval;
}
/*
* The implementation of the reduction operators with the new iterator
* turned into a bit of a long function here, but I think the design
* of this part needs to be changed to be more like einsum, so it may
* not be worth refactoring it too much. Consider this timing:
*
* >>> a = arange(10000)
*
* >>> timeit sum(a)
* 10000 loops, best of 3: 17 us per loop
*
* >>> timeit einsum("i->",a)
* 100000 loops, best of 3: 13.5 us per loop
*
* The axes must already be bounds-checked by the calling function,
* this function does not validate them.
*/
static PyArrayObject *
PyUFunc_Reduce(PyUFuncObject *ufunc,
PyArrayObject *arr, PyArrayObject *out,
int naxes, int *axes, PyArray_DTypeMeta *signature[3], int keepdims,
PyObject *initial, PyArrayObject *wheremask)
{
int iaxes, ndim;
npy_bool axis_flags[NPY_MAXDIMS];
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.reduce\n", ufunc_name);
ndim = PyArray_NDIM(arr);
/* Create an array of flags for reduction */
memset(axis_flags, 0, ndim);
for (iaxes = 0; iaxes < naxes; ++iaxes) {
int axis = axes[iaxes];
if (axis_flags[axis]) {
PyErr_SetString(PyExc_ValueError,
"duplicate value in 'axis'");
return NULL;
}
axis_flags[axis] = 1;
}
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return NULL;
}
PyArray_Descr *descrs[3];
PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc,
arr, out, signature, NPY_FALSE, descrs, NPY_UNSAFE_CASTING, "reduce");
if (ufuncimpl == NULL) {
return NULL;
}
PyArrayMethod_Context context;
NPY_context_init(&context, descrs);
context.caller = (PyObject *)ufunc;
context.method = ufuncimpl;
PyArrayObject *result = PyUFunc_ReduceWrapper(&context,
arr, out, wheremask, axis_flags, keepdims,
initial, reduce_loop, buffersize, ufunc_name, errormask);
for (int i = 0; i < 3; i++) {
Py_DECREF(descrs[i]);
}
return result;
}
static PyObject *
PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
int axis, PyArray_DTypeMeta *signature[3])
{
PyArrayObject *op[2];
int op_axes_arrays[2][NPY_MAXDIMS];
int *op_axes[2] = {op_axes_arrays[0], op_axes_arrays[1]};
npy_uint32 op_flags[2];
int idim, ndim;
int need_outer_iterator;
int res = 0;
NPY_cast_info copy_info;
NPY_cast_info_init(©_info);
#if NPY_UF_DBG_TRACING
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
#endif
PyArrayMethod_StridedLoop *strided_loop;
NpyAuxData *auxdata = NULL;
NpyIter *iter = NULL;
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
NPY_BEGIN_THREADS_DEF;
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.accumulate\n", ufunc_name);
#if 0
printf("Doing %s.accumulate on array with dtype : ", ufunc_name);
PyObject_Print((PyObject *)PyArray_DESCR(arr), stdout, 0);
printf("\n");
#endif
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return NULL;
}
/* Take a reference to out for later returning */
Py_XINCREF(out);
PyArray_Descr *descrs[3];
PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc,
arr, out, signature, NPY_TRUE, descrs, NPY_UNSAFE_CASTING,
"accumulate");
if (ufuncimpl == NULL) {
return NULL;
}
/*
* The below code assumes that all descriptors are interchangeable, we
* allow them to not be strictly identical (but they typically should be)
*/
assert(PyArray_EquivTypes(descrs[0], descrs[1])
&& PyArray_EquivTypes(descrs[0], descrs[2]));
PyArrayMethod_Context context;
NPY_context_init(&context, descrs);
context.caller = (PyObject *)ufunc,
context.method = ufuncimpl,
ndim = PyArray_NDIM(arr);
#if NPY_UF_DBG_TRACING
printf("Found %s.accumulate inner loop with dtype : ", ufunc_name);
PyObject_Print((PyObject *)descrs[0], stdout, 0);
printf("\n");
#endif
/* Set up the op_axes for the outer loop */
for (idim = 0; idim < ndim; ++idim) {
op_axes_arrays[0][idim] = idim;
op_axes_arrays[1][idim] = idim;
}
/* The per-operand flags for the outer loop */
op_flags[0] = NPY_ITER_READWRITE |
NPY_ITER_NO_BROADCAST |
NPY_ITER_ALLOCATE |
NPY_ITER_NO_SUBTYPE;
op_flags[1] = NPY_ITER_READONLY;
op[0] = out;
op[1] = arr;
need_outer_iterator = (ndim > 1);
/* We can't buffer, so must do UPDATEIFCOPY */
if (!PyArray_ISALIGNED(arr) || (out && !PyArray_ISALIGNED(out)) ||
!PyArray_EquivTypes(descrs[1], PyArray_DESCR(arr)) ||
(out &&
!PyArray_EquivTypes(descrs[0], PyArray_DESCR(out)))) {
need_outer_iterator = 1;
}
/* If input and output overlap in memory, use iterator to figure it out */
else if (out != NULL && solve_may_share_memory(out, arr, NPY_MAY_SHARE_BOUNDS) != 0) {
need_outer_iterator = 1;
}
if (need_outer_iterator) {
int ndim_iter = 0;
npy_uint32 flags = NPY_ITER_ZEROSIZE_OK|
NPY_ITER_REFS_OK|
NPY_ITER_COPY_IF_OVERLAP;
/*
* The way accumulate is set up, we can't do buffering,
* so make a copy instead when necessary.
*/
ndim_iter = ndim;
flags |= NPY_ITER_MULTI_INDEX;
/*
* Add some more flags.
*
* The accumulation outer loop is 'elementwise' over the array, so turn
* on NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE. That is, in-place
* accumulate(x, out=x) is safe to do without temporary copies.
*/
op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
NPY_UF_DBG_PRINT("Allocating outer iterator\n");
iter = NpyIter_AdvancedNew(2, op, flags,
NPY_KEEPORDER, NPY_UNSAFE_CASTING,
op_flags, descrs,
ndim_iter, op_axes, NULL, 0);
if (iter == NULL) {
goto fail;
}
/* In case COPY or UPDATEIFCOPY occurred */
op[0] = NpyIter_GetOperandArray(iter)[0];
op[1] = NpyIter_GetOperandArray(iter)[1];
if (NpyIter_RemoveAxis(iter, axis) != NPY_SUCCEED) {
goto fail;
}
if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) {
goto fail;
}
}
/* Get the output from the iterator if it was allocated */
if (out == NULL) {
if (iter) {
op[0] = out = NpyIter_GetOperandArray(iter)[0];
Py_INCREF(out);
}
else {
PyArray_Descr *dtype = descrs[0];
Py_INCREF(dtype);
op[0] = out = (PyArrayObject *)PyArray_NewFromDescr_int(
&PyArray_Type, dtype,
ndim, PyArray_DIMS(op[1]), NULL, NULL,
0, NULL, NULL, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY);
if (out == NULL) {
goto fail;
}
}
}
npy_intp fixed_strides[3];
if (need_outer_iterator) {
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
}
else {
fixed_strides[0] = PyArray_STRIDES(op[0])[axis];
fixed_strides[1] = PyArray_STRIDES(op[1])[axis];
}
// First argument is also passed as output (e.g. see dataptr below).
fixed_strides[2] = fixed_strides[0];
NPY_ARRAYMETHOD_FLAGS flags = 0;
if (ufuncimpl->get_strided_loop(&context,
1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
goto fail;
}
/* Set up function to copy the first element if it has references */
if (PyDataType_REFCHK(descrs[2])) {
NPY_ARRAYMETHOD_FLAGS copy_flags;
/* Setup guarantees aligned here. */
if (PyArray_GetDTypeTransferFunction(
1, 0, 0, descrs[1], descrs[2], 0, ©_info,
©_flags) == NPY_FAIL) {
goto fail;
}
flags = PyArrayMethod_COMBINED_FLAGS(flags, copy_flags);
}
if (iter != NULL) {
flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter));
}
int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* Start with the floating-point exception flags cleared */
npy_clear_floatstatus_barrier((char*)&iter);
}
/*
* If the reduction axis has size zero, either return the reduction
* unit for UFUNC_REDUCE, or return the zero-sized output array
* for UFUNC_ACCUMULATE.
*/
if (PyArray_DIM(op[1], axis) == 0) {
goto finish;
}
else if (PyArray_SIZE(op[0]) == 0) {
goto finish;
}
if (iter && NpyIter_GetIterSize(iter) != 0) {
char *dataptr_copy[3];
npy_intp stride_copy[3];
npy_intp count_m1, stride0, stride1;
NpyIter_IterNextFunc *iternext;
char **dataptr;
int itemsize = descrs[0]->elsize;
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
goto fail;
}
dataptr = NpyIter_GetDataPtrArray(iter);
/* Execute the loop with just the outer iterator */
count_m1 = PyArray_DIM(op[1], axis)-1;
stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis);
NPY_UF_DBG_PRINT("UFunc: Reduce loop with just outer iterator\n");
stride0 = PyArray_STRIDE(op[0], axis);
stride_copy[0] = stride0;
stride_copy[1] = stride1;
stride_copy[2] = stride0;
if (!needs_api) {
NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter));
}
do {
dataptr_copy[0] = dataptr[0];
dataptr_copy[1] = dataptr[1];
dataptr_copy[2] = dataptr[0];
/*
* Copy the first element to start the reduction.
*
* Output (dataptr[0]) and input (dataptr[1]) may point to
* the same memory, e.g. np.add.accumulate(a, out=a).
*/
if (copy_info.func) {
const npy_intp one = 1;
if (copy_info.func(
©_info.context, &dataptr_copy[1], &one,
&stride_copy[1], copy_info.auxdata) < 0) {
NPY_END_THREADS;
goto fail;
}
}
else {
memmove(dataptr_copy[2], dataptr_copy[1], itemsize);
}
if (count_m1 > 0) {
/* Turn the two items into three for the inner loop */
dataptr_copy[1] += stride1;
dataptr_copy[2] += stride0;
NPY_UF_DBG_PRINT1("iterator loop count %d\n",
(int)count_m1);
res = strided_loop(&context,
dataptr_copy, &count_m1, stride_copy, auxdata);
}
} while (res == 0 && iternext(iter));
NPY_END_THREADS;
}
else if (iter == NULL) {
char *dataptr_copy[3];
int itemsize = descrs[0]->elsize;
/* Execute the loop with no iterators */
npy_intp count = PyArray_DIM(op[1], axis);
npy_intp stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis);
NPY_UF_DBG_PRINT("UFunc: Reduce loop with no iterators\n");
if (PyArray_NDIM(op[0]) != PyArray_NDIM(op[1]) ||
!PyArray_CompareLists(PyArray_DIMS(op[0]),
PyArray_DIMS(op[1]),
PyArray_NDIM(op[0]))) {
PyErr_SetString(PyExc_ValueError,
"provided out is the wrong size "
"for the accumulation.");
goto fail;
}
stride0 = PyArray_STRIDE(op[0], axis);
/* Turn the two items into three for the inner loop */
dataptr_copy[0] = PyArray_BYTES(op[0]);
dataptr_copy[1] = PyArray_BYTES(op[1]);
dataptr_copy[2] = PyArray_BYTES(op[0]);
/*
* Copy the first element to start the reduction.
*
* Output (dataptr[0]) and input (dataptr[1]) may point to the
* same memory, e.g. np.add.accumulate(a, out=a).
*/
if (copy_info.func) {
const npy_intp one = 1;
const npy_intp strides[2] = {itemsize, itemsize};
if (copy_info.func(
©_info.context, &dataptr_copy[1], &one,
strides, copy_info.auxdata) < 0) {
goto fail;
}
}
else {
memmove(dataptr_copy[2], dataptr_copy[1], itemsize);
}
if (count > 1) {
--count;
dataptr_copy[1] += stride1;
dataptr_copy[2] += stride0;
NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count);
if (!needs_api) {
NPY_BEGIN_THREADS_THRESHOLDED(count);
}
res = strided_loop(&context,
dataptr_copy, &count, fixed_strides, auxdata);
NPY_END_THREADS;
}
}
finish:
NPY_AUXDATA_FREE(auxdata);
NPY_cast_info_xfree(©_info);
Py_DECREF(descrs[0]);
Py_DECREF(descrs[1]);
Py_DECREF(descrs[2]);
if (!NpyIter_Deallocate(iter)) {
res = -1;
}
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
res = _check_ufunc_fperr(errormask, "accumulate");
}
if (res < 0) {
Py_DECREF(out);
return NULL;
}
return (PyObject *)out;
fail:
Py_XDECREF(out);
NPY_AUXDATA_FREE(auxdata);
NPY_cast_info_xfree(©_info);
Py_XDECREF(descrs[0]);
Py_XDECREF(descrs[1]);
Py_XDECREF(descrs[2]);
NpyIter_Deallocate(iter);
return NULL;
}
/*
* Reduceat performs a reduce over an axis using the indices as a guide
*
* op.reduceat(array,indices) computes
* op.reduce(array[indices[i]:indices[i+1]]
* for i=0..end with an implicit indices[i+1]=len(array)
* assumed when i=end-1
*
* if indices[i+1] <= indices[i]+1
* then the result is array[indices[i]] for that value
*
* op.accumulate(array) is the same as
* op.reduceat(array,indices)[::2]
* where indices is range(len(array)-1) with a zero placed in every other sample
* indices = zeros(len(array)*2-1)
* indices[1::2] = range(1,len(array))
*
* output shape is based on the size of indices
*
* TODO: Reduceat duplicates too much code from accumulate!
*/
static PyObject *
PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
PyArrayObject *out, int axis, PyArray_DTypeMeta *signature[3])
{
PyArrayObject *op[3];
int op_axes_arrays[3][NPY_MAXDIMS];
int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1],
op_axes_arrays[2]};
npy_uint32 op_flags[3];
int idim, ndim;
int need_outer_iterator = 0;
int res = 0;
NpyIter *iter = NULL;
PyArrayMethod_StridedLoop *strided_loop;
NpyAuxData *auxdata = NULL;
/* The reduceat indices - ind must be validated outside this call */
npy_intp *reduceat_ind;
npy_intp i, ind_size, red_axis_size;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
char *opname = "reduceat";
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
NPY_BEGIN_THREADS_DEF;
reduceat_ind = (npy_intp *)PyArray_DATA(ind);
ind_size = PyArray_DIM(ind, 0);
red_axis_size = PyArray_DIM(arr, axis);
/* Check for out-of-bounds values in indices array */
for (i = 0; i < ind_size; ++i) {
if (reduceat_ind[i] < 0 || reduceat_ind[i] >= red_axis_size) {
PyErr_Format(PyExc_IndexError,
"index %" NPY_INTP_FMT " out-of-bounds in %s.%s [0, %" NPY_INTP_FMT ")",
reduceat_ind[i], ufunc_name, opname, red_axis_size);
return NULL;
}
}
NPY_UF_DBG_PRINT2("\nEvaluating ufunc %s.%s\n", ufunc_name, opname);
#if 0
printf("Doing %s.%s on array with dtype : ", ufunc_name, opname);
PyObject_Print((PyObject *)PyArray_DESCR(arr), stdout, 0);
printf("\n");
printf("Index size is %d\n", (int)ind_size);
#endif
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return NULL;
}
/* Take a reference to out for later returning */
Py_XINCREF(out);
PyArray_Descr *descrs[3];
PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc,
arr, out, signature, NPY_TRUE, descrs, NPY_UNSAFE_CASTING,
"reduceat");
if (ufuncimpl == NULL) {
return NULL;
}
/*
* The below code assumes that all descriptors are interchangeable, we
* allow them to not be strictly identical (but they typically should be)
*/
assert(PyArray_EquivTypes(descrs[0], descrs[1])
&& PyArray_EquivTypes(descrs[0], descrs[2]));
if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
/* This can be removed, but the initial element copy needs fixing */
PyErr_SetString(PyExc_TypeError,
"reduceat currently only supports `object` dtype with "
"references");
goto fail;
}
PyArrayMethod_Context context;
NPY_context_init(&context, descrs);
context.caller = (PyObject *)ufunc,
context.method = ufuncimpl,
ndim = PyArray_NDIM(arr);
#if NPY_UF_DBG_TRACING
printf("Found %s.%s inner loop with dtype : ", ufunc_name, opname);
PyObject_Print((PyObject *)descrs[0], stdout, 0);
printf("\n");
#endif
/* Set up the op_axes for the outer loop */
for (idim = 0; idim < ndim; ++idim) {
/* Use the i-th iteration dimension to match up ind */
if (idim == axis) {
op_axes_arrays[0][idim] = axis;
op_axes_arrays[1][idim] = -1;
op_axes_arrays[2][idim] = 0;
}
else {
op_axes_arrays[0][idim] = idim;
op_axes_arrays[1][idim] = idim;
op_axes_arrays[2][idim] = -1;
}
}
op[0] = out;
op[1] = arr;
op[2] = ind;
if (out != NULL || ndim > 1 || !PyArray_ISALIGNED(arr) ||
!PyArray_EquivTypes(descrs[0], PyArray_DESCR(arr))) {
need_outer_iterator = 1;
}
if (need_outer_iterator) {
PyArray_Descr *op_dtypes[3] = {descrs[0], descrs[1], NULL};
npy_uint32 flags = NPY_ITER_ZEROSIZE_OK|
NPY_ITER_REFS_OK|
NPY_ITER_MULTI_INDEX|
NPY_ITER_COPY_IF_OVERLAP;
/*
* The way reduceat is set up, we can't do buffering,
* so make a copy instead when necessary using
* the UPDATEIFCOPY flag
*/
/* The per-operand flags for the outer loop */
op_flags[0] = NPY_ITER_READWRITE|
NPY_ITER_NO_BROADCAST|
NPY_ITER_ALLOCATE|
NPY_ITER_NO_SUBTYPE|
NPY_ITER_UPDATEIFCOPY|
NPY_ITER_ALIGNED;
op_flags[1] = NPY_ITER_READONLY|
NPY_ITER_COPY|
NPY_ITER_ALIGNED;
op_flags[2] = NPY_ITER_READONLY;
op_dtypes[1] = op_dtypes[0];
NPY_UF_DBG_PRINT("Allocating outer iterator\n");
iter = NpyIter_AdvancedNew(3, op, flags,
NPY_KEEPORDER, NPY_UNSAFE_CASTING,
op_flags, op_dtypes,
ndim, op_axes, NULL, 0);
if (iter == NULL) {
goto fail;
}
/* Remove the inner loop axis from the outer iterator */
if (NpyIter_RemoveAxis(iter, axis) != NPY_SUCCEED) {
goto fail;
}
if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) {
goto fail;
}
/* In case COPY or UPDATEIFCOPY occurred */
op[0] = NpyIter_GetOperandArray(iter)[0];
op[1] = NpyIter_GetOperandArray(iter)[1];
op[2] = NpyIter_GetOperandArray(iter)[2];
if (out == NULL) {
out = op[0];
Py_INCREF(out);
}
}
else {
/*
* Allocate the output for when there's no outer iterator, we always
* use the outer_iteration path when `out` is passed.
*/
assert(out == NULL);
Py_INCREF(descrs[0]);
op[0] = out = (PyArrayObject *)PyArray_NewFromDescr(
&PyArray_Type, descrs[0],
1, &ind_size, NULL, NULL,
0, NULL);
if (out == NULL) {
goto fail;
}
}
npy_intp fixed_strides[3];
if (need_outer_iterator) {
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
}
else {
fixed_strides[1] = PyArray_STRIDES(op[1])[axis];
}
/* The reduce axis does not advance here in the strided-loop */
fixed_strides[0] = 0;
fixed_strides[2] = 0;
NPY_ARRAYMETHOD_FLAGS flags = 0;
if (ufuncimpl->get_strided_loop(&context,
1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
goto fail;
}
if (iter != NULL) {
flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter));
}
int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* Start with the floating-point exception flags cleared */
npy_clear_floatstatus_barrier((char*)&iter);
}
/*
* If the output has zero elements, return now.
*/
if (PyArray_SIZE(op[0]) == 0) {
goto finish;
}
if (iter && NpyIter_GetIterSize(iter) != 0) {
char *dataptr_copy[3];
npy_intp stride_copy[3];
NpyIter_IterNextFunc *iternext;
char **dataptr;
npy_intp count_m1;
npy_intp stride0, stride1;
npy_intp stride0_ind = PyArray_STRIDE(op[0], axis);
int itemsize = descrs[0]->elsize;
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
goto fail;
}
dataptr = NpyIter_GetDataPtrArray(iter);
/* Execute the loop with just the outer iterator */
count_m1 = PyArray_DIM(op[1], axis)-1;
stride0 = 0;
stride1 = PyArray_STRIDE(op[1], axis);
NPY_UF_DBG_PRINT("UFunc: Reduce loop with just outer iterator\n");
stride_copy[0] = stride0;
stride_copy[1] = stride1;
stride_copy[2] = stride0;
if (!needs_api) {
NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter));
}
do {
for (i = 0; i < ind_size; ++i) {
npy_intp start = reduceat_ind[i],
end = (i == ind_size-1) ? count_m1+1 :
reduceat_ind[i+1];
npy_intp count = end - start;
dataptr_copy[0] = dataptr[0] + stride0_ind*i;
dataptr_copy[1] = dataptr[1] + stride1*start;
dataptr_copy[2] = dataptr[0] + stride0_ind*i;
/*
* Copy the first element to start the reduction.
*
* Output (dataptr[0]) and input (dataptr[1]) may point
* to the same memory, e.g.
* np.add.reduceat(a, np.arange(len(a)), out=a).
*/
if (descrs[2]->type_num == NPY_OBJECT) {
/*
* Incref before decref to avoid the possibility of
* the reference count being zero temporarily.
*/
Py_XINCREF(*(PyObject **)dataptr_copy[1]);
Py_XDECREF(*(PyObject **)dataptr_copy[0]);
*(PyObject **)dataptr_copy[0] =
*(PyObject **)dataptr_copy[1];
}
else {
memmove(dataptr_copy[0], dataptr_copy[1], itemsize);
}
if (count > 1) {
/* Inner loop like REDUCE */
--count;
dataptr_copy[1] += stride1;
NPY_UF_DBG_PRINT1("iterator loop count %d\n",
(int)count);
res = strided_loop(&context,
dataptr_copy, &count, stride_copy, auxdata);
}
}
} while (res == 0 && iternext(iter));
NPY_END_THREADS;
}
else if (iter == NULL) {
char *dataptr_copy[3];
int itemsize = descrs[0]->elsize;
npy_intp stride0_ind = PyArray_STRIDE(op[0], axis);
npy_intp stride1 = PyArray_STRIDE(op[1], axis);
NPY_UF_DBG_PRINT("UFunc: Reduce loop with no iterators\n");
if (!needs_api) {
NPY_BEGIN_THREADS;
}
for (i = 0; i < ind_size; ++i) {
npy_intp start = reduceat_ind[i],
end = (i == ind_size-1) ? PyArray_DIM(arr,axis) :
reduceat_ind[i+1];
npy_intp count = end - start;
dataptr_copy[0] = PyArray_BYTES(op[0]) + stride0_ind*i;
dataptr_copy[1] = PyArray_BYTES(op[1]) + stride1*start;
dataptr_copy[2] = PyArray_BYTES(op[0]) + stride0_ind*i;
/*
* Copy the first element to start the reduction.
*
* Output (dataptr[0]) and input (dataptr[1]) may point to
* the same memory, e.g.
* np.add.reduceat(a, np.arange(len(a)), out=a).
*/
if (descrs[2]->type_num == NPY_OBJECT) {
/*
* Incref before decref to avoid the possibility of the
* reference count being zero temporarily.
*/
Py_XINCREF(*(PyObject **)dataptr_copy[1]);
Py_XDECREF(*(PyObject **)dataptr_copy[0]);
*(PyObject **)dataptr_copy[0] =
*(PyObject **)dataptr_copy[1];
}
else {
memmove(dataptr_copy[0], dataptr_copy[1], itemsize);
}
if (count > 1) {
/* Inner loop like REDUCE */
--count;
dataptr_copy[1] += stride1;
NPY_UF_DBG_PRINT1("iterator loop count %d\n",
(int)count);
res = strided_loop(&context,
dataptr_copy, &count, fixed_strides, auxdata);
if (res != 0) {
break;
}
}
}
NPY_END_THREADS;
}
finish:
NPY_AUXDATA_FREE(auxdata);
Py_DECREF(descrs[0]);
Py_DECREF(descrs[1]);
Py_DECREF(descrs[2]);
if (!NpyIter_Deallocate(iter)) {
res = -1;
}
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
res = _check_ufunc_fperr(errormask, "reduceat");
}
if (res < 0) {
Py_DECREF(out);
return NULL;
}
return (PyObject *)out;
fail:
Py_XDECREF(out);
NPY_AUXDATA_FREE(auxdata);
Py_XDECREF(descrs[0]);
Py_XDECREF(descrs[1]);
Py_XDECREF(descrs[2]);
NpyIter_Deallocate(iter);
return NULL;
}
static npy_bool
tuple_all_none(PyObject *tup) {
npy_intp i;
for (i = 0; i < PyTuple_GET_SIZE(tup); ++i) {
if (PyTuple_GET_ITEM(tup, i) != Py_None) {
return NPY_FALSE;
}
}
return NPY_TRUE;
}
static int
_set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args)
{
if (PyTuple_CheckExact(out_obj)) {
if (PyTuple_GET_SIZE(out_obj) != nout) {
PyErr_SetString(PyExc_ValueError,
"The 'out' tuple must have exactly "
"one entry per ufunc output");
return -1;
}
if (tuple_all_none(out_obj)) {
return 0;
}
else {
Py_INCREF(out_obj);
full_args->out = out_obj;
}
}
else if (nout == 1) {
if (out_obj == Py_None) {
return 0;
}
/* Can be an array if it only has one output */
full_args->out = PyTuple_Pack(1, out_obj);
if (full_args->out == NULL) {
return -1;
}
}
else {
PyErr_SetString(PyExc_TypeError,
nout > 1 ? "'out' must be a tuple of arrays" :
"'out' must be an array or a tuple with "
"a single array");
return -1;
}
return 0;
}
/* forward declaration */
static PyArray_DTypeMeta * _get_dtype(PyObject *dtype_obj);
/*
* This code handles reduce, reduceat, and accumulate
* (accumulate and reduce are special cases of the more general reduceat
* but they are handled separately for speed)
*/
static PyObject *
PyUFunc_GenericReduction(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, int operation)
{
int i, naxes=0, ndim;
int axes[NPY_MAXDIMS];
ufunc_full_args full_args = {NULL, NULL};
PyObject *axes_obj = NULL;
PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL;
PyObject *op = NULL;
PyArrayObject *indices = NULL;
PyArray_DTypeMeta *signature[3] = {NULL, NULL, NULL};
PyArrayObject *out = NULL;
int keepdims = 0;
PyObject *initial = NULL;
npy_bool out_is_passed_by_position;
static char *_reduce_type[] = {"reduce", "accumulate", "reduceat", NULL};
if (ufunc == NULL) {
PyErr_SetString(PyExc_ValueError, "function not supported");
return NULL;
}
if (ufunc->core_enabled) {
PyErr_Format(PyExc_RuntimeError,
"Reduction not defined on ufunc with signature");
return NULL;
}
if (ufunc->nin != 2) {
PyErr_Format(PyExc_ValueError,
"%s only supported for binary functions",
_reduce_type[operation]);
return NULL;
}
if (ufunc->nout != 1) {
PyErr_Format(PyExc_ValueError,
"%s only supported for functions "
"returning a single value",
_reduce_type[operation]);
return NULL;
}
/*
* Perform argument parsing, but start by only extracting. This is
* just to preserve the behaviour that __array_ufunc__ did not perform
* any checks on arguments, and we could change this or change it for
* certain parameters.
*/
PyObject *otype_obj = NULL, *out_obj = NULL, *indices_obj = NULL;
PyObject *keepdims_obj = NULL, *wheremask_obj = NULL;
npy_bool return_scalar = NPY_TRUE; /* scalar return is disabled for out=... */
if (operation == UFUNC_REDUCEAT) {
NPY_PREPARE_ARGPARSER;
if (npy_parse_arguments("reduceat", args, len_args, kwnames,
"array", NULL, &op,
"indices", NULL, &indices_obj,
"|axis", NULL, &axes_obj,
"|dtype", NULL, &otype_obj,
"|out", NULL, &out_obj,
NULL, NULL, NULL) < 0) {
goto fail;
}
/* Prepare inputs for PyUfunc_CheckOverride */
full_args.in = PyTuple_Pack(2, op, indices_obj);
if (full_args.in == NULL) {
goto fail;
}
out_is_passed_by_position = len_args >= 5;
}
else if (operation == UFUNC_ACCUMULATE) {
NPY_PREPARE_ARGPARSER;
if (npy_parse_arguments("accumulate", args, len_args, kwnames,
"array", NULL, &op,
"|axis", NULL, &axes_obj,
"|dtype", NULL, &otype_obj,
"|out", NULL, &out_obj,
NULL, NULL, NULL) < 0) {
goto fail;
}
/* Prepare input for PyUfunc_CheckOverride */
full_args.in = PyTuple_Pack(1, op);
if (full_args.in == NULL) {
goto fail;
}
out_is_passed_by_position = len_args >= 4;
}
else {
NPY_PREPARE_ARGPARSER;
if (npy_parse_arguments("reduce", args, len_args, kwnames,
"array", NULL, &op,
"|axis", NULL, &axes_obj,
"|dtype", NULL, &otype_obj,
"|out", NULL, &out_obj,
"|keepdims", NULL, &keepdims_obj,
"|initial", &_not_NoValue, &initial,
"|where", NULL, &wheremask_obj,
NULL, NULL, NULL) < 0) {
goto fail;
}
/* Prepare input for PyUfunc_CheckOverride */
full_args.in = PyTuple_Pack(1, op);
if (full_args.in == NULL) {
goto fail;
}
out_is_passed_by_position = len_args >= 4;
}
/* Normalize output for PyUFunc_CheckOverride and conversion. */
if (out_is_passed_by_position) {
/* in this branch, out is always wrapped in a tuple. */
if (out_obj == Py_Ellipsis) {
PyErr_SetString(PyExc_TypeError,
"out=... is only allowed as a keyword argument.");
goto fail;
}
if (out_obj != Py_None) {
full_args.out = PyTuple_Pack(1, out_obj);
if (full_args.out == NULL) {
goto fail;
}
}
}
else if (out_obj) {
if (out_obj == Py_Ellipsis) {
out_obj = NULL;
return_scalar = NPY_FALSE;
}
else if (_set_full_args_out(1, out_obj, &full_args) < 0) {
goto fail;
}
/* Ensure that out_obj is the array, not the tuple: */
if (full_args.out != NULL) {
out_obj = PyTuple_GET_ITEM(full_args.out, 0);
}
}
/* We now have all the information required to check for Overrides */
PyObject *override = NULL;
int errval = PyUFunc_CheckOverride(ufunc, _reduce_type[operation],
full_args.in, full_args.out, wheremask_obj, args, len_args, kwnames, &override);
if (errval) {
return NULL;
}
else if (override) {
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
return override;
}
/* Finish parsing of all parameters (no matter which reduce-like) */
if (indices_obj) {
PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP);
indices = (PyArrayObject *)PyArray_FromAny(indices_obj,
indtype, 1, 1, NPY_ARRAY_CARRAY, NULL);
if (indices == NULL) {
goto fail;
}
}
if (otype_obj && otype_obj != Py_None) {
/* Use `_get_dtype` because `dtype` is a DType and not the instance */
signature[0] = _get_dtype(otype_obj);
if (signature[0] == NULL) {
goto fail;
}
}
if (out_obj && _set_out_array(out_obj, &out) < 0) {
goto fail;
}
if (keepdims_obj && !PyArray_PythonPyIntFromInt(keepdims_obj, &keepdims)) {
goto fail;
}
if (wheremask_obj && !_wheremask_converter(wheremask_obj, &wheremask)) {
goto fail;
}
/* Ensure input is an array */
mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, NULL);
if (mp == NULL) {
goto fail;
}
ndim = PyArray_NDIM(mp);
/* Convert the 'axis' parameter into a list of axes */
if (axes_obj == NULL) {
/* apply defaults */
if (ndim == 0) {
naxes = 0;
}
else {
naxes = 1;
axes[0] = 0;
}
}
else if (axes_obj == Py_None) {
/* Convert 'None' into all the axes */
naxes = ndim;
for (i = 0; i < naxes; ++i) {
axes[i] = i;
}
}
else if (PyTuple_Check(axes_obj)) {
naxes = PyTuple_Size(axes_obj);
if (naxes < 0 || naxes > NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
"too many values for 'axis'");
goto fail;
}
for (i = 0; i < naxes; ++i) {
PyObject *tmp = PyTuple_GET_ITEM(axes_obj, i);
int axis = PyArray_PyIntAsInt(tmp);
if (error_converting(axis)) {
goto fail;
}
if (check_and_adjust_axis(&axis, ndim) < 0) {
goto fail;
}
axes[i] = (int)axis;
}
}
else {
/* Try to interpret axis as an integer */
int axis = PyArray_PyIntAsInt(axes_obj);
/* TODO: PyNumber_Index would be good to use here */
if (error_converting(axis)) {
goto fail;
}
/*
* As a special case for backwards compatibility in 'sum',
* 'prod', et al, also allow a reduction for scalars even
* though this is technically incorrect.
*/
if (ndim == 0 && (axis == 0 || axis == -1)) {
naxes = 0;
}
else if (check_and_adjust_axis(&axis, ndim) < 0) {
goto fail;
}
else {
axes[0] = (int)axis;
naxes = 1;
}
}
switch(operation) {
case UFUNC_REDUCE:
ret = PyUFunc_Reduce(ufunc,
mp, out, naxes, axes, signature, keepdims, initial, wheremask);
Py_XSETREF(wheremask, NULL);
break;
case UFUNC_ACCUMULATE:
if (ndim == 0) {
PyErr_SetString(PyExc_TypeError, "cannot accumulate on a scalar");
goto fail;
}
if (naxes != 1) {
PyErr_SetString(PyExc_ValueError,
"accumulate does not allow multiple axes");
goto fail;
}
ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc,
mp, out, axes[0], signature);
break;
case UFUNC_REDUCEAT:
if (ndim == 0) {
PyErr_SetString(PyExc_TypeError, "cannot reduceat on a scalar");
goto fail;
}
if (naxes != 1) {
PyErr_SetString(PyExc_ValueError,
"reduceat does not allow multiple axes");
goto fail;
}
ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc,
mp, indices, out, axes[0], signature);
Py_SETREF(indices, NULL);
break;
}
if (ret == NULL) {
goto fail;
}
Py_XDECREF(out);
Py_DECREF(signature[0]);
Py_DECREF(signature[1]);
Py_DECREF(signature[2]);
Py_DECREF(mp);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
/* Wrap and return the output */
PyObject *wrap, *wrap_type;
if (npy_find_array_wrap(1, &op, &wrap, &wrap_type) < 0) {
Py_DECREF(ret);
return NULL;
}
/* TODO: Data is mutated, so force_wrap like a normal ufunc call does */
PyObject *wrapped_result = npy_apply_wrap(
(PyObject *)ret, out_obj, wrap, wrap_type, NULL,
PyArray_NDIM(ret) == 0 && return_scalar, NPY_FALSE);
Py_DECREF(ret);
Py_DECREF(wrap);
Py_DECREF(wrap_type);
return wrapped_result;
fail:
Py_XDECREF(out);
Py_XDECREF(signature[0]);
Py_XDECREF(signature[1]);
Py_XDECREF(signature[2]);
Py_XDECREF(mp);
Py_XDECREF(wheremask);
Py_XDECREF(indices);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
return NULL;
}
/*
* Perform a basic check on `dtype`, `sig`, and `signature` since only one
* may be set. If `sig` is used, writes it into `out_signature` (which should
* be set to `signature_obj` so that following code only requires to handle
* `signature_obj`).
*
* Does NOT incref the output! This only copies the borrowed references
* gotten during the argument parsing.
*
* This function does not do any normalization of the input dtype tuples,
* this happens after the array-ufunc override check currently.
*/
static int
_check_and_copy_sig_to_signature(
PyObject *sig_obj, PyObject *signature_obj, PyObject *dtype,
PyObject **out_signature)
{
*out_signature = NULL;
if (signature_obj != NULL) {
*out_signature = signature_obj;
}
if (sig_obj != NULL) {
if (*out_signature != NULL) {
PyErr_SetString(PyExc_TypeError,
"cannot specify both 'sig' and 'signature'");
*out_signature = NULL;
return -1;
}
*out_signature = sig_obj;
}
if (dtype != NULL) {
if (*out_signature != NULL) {
PyErr_SetString(PyExc_TypeError,
"cannot specify both 'signature' and 'dtype'");
return -1;
}
/* dtype needs to be converted, delay after the override check */
}
return 0;
}
/*
* Note: This function currently lets DType classes pass, but in general
* the class (not the descriptor instance) is the preferred input, so the
* parsing should eventually be adapted to prefer classes and possible
* deprecated instances. (Users should not notice that much, since `np.float64`
* or "float64" usually denotes the DType class rather than the instance.)
*/
static PyArray_DTypeMeta *
_get_dtype(PyObject *dtype_obj) {
if (PyObject_TypeCheck(dtype_obj, &PyArrayDTypeMeta_Type)) {
Py_INCREF(dtype_obj);
return (PyArray_DTypeMeta *)dtype_obj;
}
else {
PyArray_Descr *descr = NULL;
if (!PyArray_DescrConverter(dtype_obj, &descr)) {
return NULL;
}
PyArray_DTypeMeta *out = NPY_DTYPE(descr);
if (NPY_UNLIKELY(!NPY_DT_is_legacy(out))) {
/* TODO: this path was unreachable when added. */
PyErr_SetString(PyExc_TypeError,
"Cannot pass a new user DType instance to the `dtype` or "
"`signature` arguments of ufuncs. Pass the DType class "
"instead.");
Py_DECREF(descr);
return NULL;
}
else if (NPY_UNLIKELY(out->singleton != descr)) {
/* This does not warn about `metadata`, but units is important. */
if (out->singleton == NULL
|| !PyArray_EquivTypes(out->singleton, descr)) {
PyErr_SetString(PyExc_TypeError,
"The `dtype` and `signature` arguments to "
"ufuncs only select the general DType and not details "
"such as the byte order or time unit. "
"You can avoid this error by using the scalar types "
"`np.float64` or the dtype string notation.");
Py_DECREF(descr);
return NULL;
}
}
Py_INCREF(out);
Py_DECREF(descr);
return out;
}
}
/*
* Finish conversion parsing of the DType signature. NumPy always only
* honored the type number for passed in descriptors/dtypes.
* The `dtype` argument is interpreted as the first output DType (not
* descriptor).
* Unlike the dtype of an `out` array, it influences loop selection!
*
* It is the callers responsibility to clean `signature` and NULL it before
* calling.
*/
static int
_get_fixed_signature(PyUFuncObject *ufunc,
PyObject *dtype_obj, PyObject *signature_obj,
PyArray_DTypeMeta **signature)
{
if (dtype_obj == NULL && signature_obj == NULL) {
return 0;
}
int nin = ufunc->nin, nout = ufunc->nout, nop = nin + nout;
if (dtype_obj != NULL) {
if (dtype_obj == Py_None) {
/* If `dtype=None` is passed, no need to do anything */
return 0;
}
if (nout == 0) {
/* This may be allowed (NumPy does not do this)? */
PyErr_SetString(PyExc_TypeError,
"Cannot provide `dtype` when a ufunc has no outputs");
return -1;
}
PyArray_DTypeMeta *dtype = _get_dtype(dtype_obj);
if (dtype == NULL) {
return -1;
}
for (int i = nin; i < nop; i++) {
Py_INCREF(dtype);
signature[i] = dtype;
}
Py_DECREF(dtype);
return 0;
}
assert(signature_obj != NULL);
/* Fill in specified_types from the tuple or string (signature_obj) */
if (PyTuple_Check(signature_obj)) {
Py_ssize_t n = PyTuple_GET_SIZE(signature_obj);
if (n == 1 && nop != 1) {
/*
* Special handling, because we deprecate this path. The path
* probably mainly existed since the `dtype=obj` was passed through
* as `(obj,)` and parsed later.
*/
if (PyTuple_GET_ITEM(signature_obj, 0) == Py_None) {
PyErr_SetString(PyExc_TypeError,
"a single item type tuple cannot contain None.");
return -1;
}
PyErr_SetString(PyExc_TypeError,
"Use `dtype` or fill the tuple with more than one 'None'.");
return -1;
}
if (n != nop) {
PyErr_Format(PyExc_ValueError,
"a type-tuple must be specified of length %d for ufunc '%s'",
nop, ufunc_get_name_cstr(ufunc));
return -1;
}
for (int i = 0; i < nop; ++i) {
PyObject *item = PyTuple_GET_ITEM(signature_obj, i);
if (item == Py_None) {
continue;
}
else {
signature[i] = _get_dtype(item);
if (signature[i] == NULL) {
return -1;
}
else if (i < nin && NPY_DT_is_abstract(signature[i])) {
/*
* We reject abstract input signatures for now. These
* can probably be defined by finding the common DType with
* the actual input and using the result of this for the
* promotion.
*/
PyErr_SetString(PyExc_TypeError,
"Input DTypes to the signature must not be "
"abstract. The behaviour may be defined in the "
"future.");
return -1;
}
}
}
}
else if (PyBytes_Check(signature_obj) || PyUnicode_Check(signature_obj)) {
PyObject *str_object = NULL;
if (PyBytes_Check(signature_obj)) {
str_object = PyUnicode_FromEncodedObject(signature_obj, NULL, NULL);
if (str_object == NULL) {
return -1;
}
}
else {
Py_INCREF(signature_obj);
str_object = signature_obj;
}
Py_ssize_t length;
const char *str = PyUnicode_AsUTF8AndSize(str_object, &length);
if (str == NULL) {
Py_DECREF(str_object);
return -1;
}
if (length != 1 && (length != nin+nout + 2 ||
str[nin] != '-' || str[nin+1] != '>')) {
PyErr_Format(PyExc_ValueError,
"a type-string for %s, %d typecode(s) before and %d after "
"the -> sign", ufunc_get_name_cstr(ufunc), nin, nout);
Py_DECREF(str_object);
return -1;
}
if (length == 1 && nin+nout != 1) {
Py_DECREF(str_object);
PyErr_SetString(PyExc_TypeError,
"Use `dtype` or fill the tuple with more than one 'None'.");
return -1;
}
else {
for (int i = 0; i < nin+nout; ++i) {
npy_intp istr = i < nin ? i : i+2;
PyArray_Descr *descr = PyArray_DescrFromType(str[istr]);
if (descr == NULL) {
Py_DECREF(str_object);
return -1;
}
signature[i] = NPY_DTYPE(descr);
Py_INCREF(signature[i]);
Py_DECREF(descr);
}
Py_DECREF(str_object);
}
}
else {
PyErr_SetString(PyExc_TypeError,
"the signature object to ufunc must be a string or a tuple.");
return -1;
}
return 0;
}
/*
* Fill in the actual descriptors used for the operation. This function
* supports falling back to the legacy `ufunc->type_resolver`.
*
* We guarantee the array-method that all passed in descriptors are of the
* correct DType instance (i.e. a string can just fetch the length, it doesn't
* need to "cast" to string first).
*/
static int
resolve_descriptors(int nop,
PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl,
PyArrayObject *operands[], PyArray_Descr *dtypes[],
PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[],
PyObject *inputs_tup, NPY_CASTING casting)
{
int retval = -1;
NPY_CASTING safety;
int n_cleanup = 0; /* number of original_descrs filled (to XDECREF) */
PyArray_Descr *original_descrs[NPY_MAXARGS];
NPY_UF_DBG_PRINT("Resolving the descriptors\n");
if (NPY_UNLIKELY(ufuncimpl->resolve_descriptors_with_scalars != NULL)) {
/*
* Allow a somewhat more powerful approach which:
* 1. Has access to scalars (currently only ever Python ones)
* 2. Can in principle customize `PyArray_CastDescrToDType()`
* (also because we want to avoid calling it for the scalars).
*/
int nin = ufunc->nin;
PyObject *input_scalars[NPY_MAXARGS];
for (int i = 0; i < nop; i++) {
if (operands[i] == NULL) {
original_descrs[i] = NULL;
}
else {
/* For abstract DTypes, we might want to change what this is */
original_descrs[i] = PyArray_DTYPE(operands[i]);
Py_INCREF(original_descrs[i]);
}
/*
* Check whether something is a scalar of the given type.
* We leave it to resolve_descriptors_with_scalars to deal
* with, e.g., only doing something special for python scalars.
*/
if (i < nin && inputs_tup != NULL) {
PyObject *input = PyTuple_GET_ITEM(inputs_tup, i);
input_scalars[i] = signature[i]->scalar_type == Py_TYPE(input) ?
input : NULL;
}
else {
input_scalars[i] = NULL;
}
}
n_cleanup = nop;
npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */
safety = ufuncimpl->resolve_descriptors_with_scalars(
ufuncimpl, signature, original_descrs, input_scalars,
dtypes, &view_offset
);
/* For scalars, replace the operand if needed (scalars can't be out) */
for (int i = 0; i < nin; i++) {
if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) {
/* `resolve_descriptors_with_scalars` decides the descr */
if (npy_update_operand_for_scalar(
&operands[i], input_scalars[i], dtypes[i],
/* ignore cast safety for this op (resolvers job) */
NPY_SAFE_CASTING) < 0) {
goto finish;
}
}
}
goto check_safety;
}
for (int i = 0; i < nop; ++i) {
if (operands[i] == NULL) {
original_descrs[i] = NULL;
continue;
}
PyArray_Descr *descr = PyArray_DTYPE(operands[i]);
/*
* If we are working with Python literals/scalars, deal with them.
* If needed, we create new array with the right descriptor.
*/
if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) {
PyObject *input;
if (inputs_tup == NULL) {
input = NULL;
}
else {
input = PyTuple_GET_ITEM(inputs_tup, i);
}
PyArray_Descr *new_descr = npy_find_descr_for_scalar(
input, descr, original_DTypes[i], signature[i]);
if (new_descr == NULL) {
goto finish;
}
int res = npy_update_operand_for_scalar(
&operands[i], input, new_descr, casting);
Py_DECREF(new_descr);
if (res < 0) {
goto finish;
}
/* Descriptor may have been modified along the way */
descr = PyArray_DESCR(operands[i]);
}
/*
* The dtype may mismatch the signature, in which case we need
* to make it fit before calling the resolution.
*/
original_descrs[i] = PyArray_CastDescrToDType(descr, signature[i]);
if (original_descrs[i] == NULL) {
goto finish;
}
n_cleanup += 1;
}
if (ufuncimpl->resolve_descriptors != &wrapped_legacy_resolve_descriptors) {
/* The default: use the `ufuncimpl` as nature intended it */
npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */
safety = ufuncimpl->resolve_descriptors(ufuncimpl,
signature, original_descrs, dtypes, &view_offset);
goto check_safety;
}
else {
/*
* Fall-back to legacy resolver using `operands`, used exclusively
* for datetime64/timedelta64 and custom ufuncs (in pyerfa/astropy).
*/
retval = ufunc->type_resolver(ufunc, casting, operands, NULL, dtypes);
goto finish;
}
check_safety:
if (safety < 0) {
goto finish;
}
if (NPY_UNLIKELY(PyArray_MinCastSafety(safety, casting) != casting)) {
/* TODO: Currently impossible to reach (specialized unsafe loop) */
PyErr_Format(PyExc_TypeError,
"The ufunc implementation for %s with the given dtype "
"signature is not possible under the casting rule %s",
ufunc_get_name_cstr(ufunc), npy_casting_to_string(casting));
goto finish;
}
retval = 0;
finish:
for (int i = 0; i < n_cleanup; i++) {
Py_XDECREF(original_descrs[i]);
}
return retval;
}
/**
* Wraps all outputs and returns the result (which may be NULL on error).
*
* Use __array_wrap__ on all outputs
* if present on one of the input arguments.
* If present for multiple inputs:
* use __array_wrap__ of input object with largest
* __array_priority__ (default = 0.0)
*
* Exception: we should not wrap outputs for items already
* passed in as output-arguments. These items should either
* be left unwrapped or wrapped by calling their own __array_wrap__
* routine.
*
* For each output argument, wrap will be either
* NULL --- call PyArray_Return() -- default if no output arguments given
* None --- array-object passed in don't call PyArray_Return
* method --- the __array_wrap__ method to call.
*
* @param ufunc The universal function to be wrapped
* @param full_args Original inputs and outputs
* @param subok Whether subclasses are allowed
* @param result_arrays The ufunc result(s). REFERENCES ARE STOLEN!
* @param return_scalar Set to NPY_FALSE (out=...) to ensure array return.
*/
static PyObject *
replace_with_wrapped_result_and_return(PyUFuncObject *ufunc,
ufunc_full_args full_args, npy_bool subok,
PyArrayObject *result_arrays[], npy_bool return_scalar)
{
PyObject *result = NULL;
PyObject *wrap, *wrap_type;
if (!subok) {
/* subok=False ignores input wrapping (but not output) */
Py_INCREF(Py_None);
wrap = Py_None;
Py_INCREF(&PyArray_Type);
wrap_type = (PyObject *)&PyArray_Type;
}
else if (npy_find_array_wrap(
ufunc->nin, PySequence_Fast_ITEMS(full_args.in),
&wrap, &wrap_type) < 0) {
goto fail;
}
/* wrap outputs */
NpyUFuncContext context = {
.ufunc = (PyObject *)ufunc,
.in = full_args.in, .out = full_args.out};
if (ufunc->nout != 1) {
result = PyTuple_New(ufunc->nout);
if (result == NULL) {
goto fail;
}
}
for (int out_i = 0; out_i < ufunc->nout; out_i++) {
context.out_i = out_i;
PyObject *original_out = NULL;
if (full_args.out) {
original_out = PyTuple_GET_ITEM(full_args.out, out_i);
}
PyObject *ret_i = npy_apply_wrap(
(PyObject *)result_arrays[out_i], original_out, wrap, wrap_type,
/* Always try to return a scalar right now: */
&context, PyArray_NDIM(result_arrays[out_i]) == 0 && return_scalar, NPY_TRUE);
Py_CLEAR(result_arrays[out_i]);
if (ret_i == NULL) {
goto fail;
}
/* When we are not returning a tuple, this is the result: */
if (result == NULL) {
result = ret_i;
goto finish;
}
PyTuple_SET_ITEM(result, out_i, ret_i);
}
finish:
Py_DECREF(wrap);
Py_DECREF(wrap_type);
return result;
fail:
/* Fail path ensures result_arrays are fully cleared */
Py_XDECREF(result);
Py_DECREF(wrap);
Py_DECREF(wrap_type);
for (int i = 0; i < ufunc->nout; i++) {
Py_CLEAR(result_arrays[i]);
}
return NULL;
}
/*
* Check whether the input object is a known scalar and whether the ufunc has
* a suitable inner loop for it, which takes and returns the data type of the
* input (this function is not called if output or any other argument was given).
* If a loop was found, call it and store the result.
*
* Returns -2 if a short-cut is not possible, 0 on success and -1 on error.
*/
static int
try_trivial_scalar_call(
PyUFuncObject *ufunc, PyObject *const obj, PyObject **result)
{
assert(ufunc->nin == 1 && ufunc->nout == 1 && !ufunc->core_enabled);
npy_clongdouble cin, cout; // aligned storage, using longest type.
char *in = (char *)&cin, *out = (char *)&cout;
char *data[] = {in, out};
int ret = -2;
PyArray_Descr *dt;
/*
* For supported input, get input pointer and descriptor. Otherwise, bail.
*/
if (obj == Py_False || obj == Py_True) {
*(npy_bool *)in = (obj == Py_True);
dt = PyArray_DescrFromType(NPY_BOOL);
}
else if (PyFloat_CheckExact(obj)) {
*(double *)in = PyFloat_AS_DOUBLE(obj);
dt = PyArray_DescrFromType(NPY_FLOAT64);
}
else if (PyLong_CheckExact(obj)) {
int overflow;
npy_intp val = PyLong_AsLongAndOverflow(obj, &overflow);
if (overflow) {
return -2; // bail, main code perhaps deals with this.
}
if (error_converting(val)) {
return -1; // should never happen; pass on it if does.
}
*(npy_intp *)in = val;
dt = PyArray_DescrFromType(NPY_INTP);
}
else if (PyComplex_CheckExact(obj)) {
Py_complex oop = PyComplex_AsCComplex(obj);
if (error_converting(oop.real)) {
return -1; // should never happen; pass on it if does.
}
*(double *)in = oop.real;
*(double *)(in+sizeof(double)) = oop.imag;
dt = PyArray_DescrFromType(NPY_COMPLEX128);
}
else if (is_anyscalar_exact(obj)) {
dt = PyArray_DescrFromScalar(obj);
if (!PyDataType_ISNUMBER(dt)) {
goto bail;
}
data[0] = scalar_value(obj, dt);
}
else {
return -2;
}
/*
* Check the ufunc supports our descriptor, bailing (return -2) if not.
*/
// Try getting info from the (private) cache. Fall back if not found,
// so that the the dtype gets registered and things will work next time.
PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL};
PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference.
(PyArrayIdentityHash *)ufunc->_dispatch_cache,
(PyObject **)op_dtypes);
if (info == NULL) {
goto bail;
}
// Check actual dtype is correct (can be wrong with promotion).
PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0);
if ((PyTuple_GET_ITEM(all_dtypes, 0) != (PyObject *)NPY_DTYPE(dt)) ||
(PyTuple_GET_ITEM(all_dtypes, 1) != (PyObject *)NPY_DTYPE(dt))) {
goto bail;
}
// Get method, bailing if not an arraymethod (e.g., a promotor).
PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1);
if (!PyObject_TypeCheck(method, &PyArrayMethod_Type)) {
goto bail;
}
// Get loop, requiring that the output and input dtype are the same.
PyArrayMethod_Context context;
PyArray_Descr *descrs[2] = {dt, dt};
NPY_context_init(&context, descrs);
context.caller = (PyObject *)ufunc;
context.method = method;
npy_intp strides[2] = {0, 0}; // 0 ensures scalar math, not SIMD for half.
PyArrayMethod_StridedLoop *strided_loop;
NpyAuxData *auxdata = NULL;
NPY_ARRAYMETHOD_FLAGS flags = 0;
if (method->get_strided_loop(&context, 1, 0, strides,
&strided_loop, &auxdata, &flags) < 0) {
ret = -1; // Should not happen, so raise error if it does anyway.
goto bail;
}
/*
* Call loop with single element, checking floating point errors.
*/
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
npy_clear_floatstatus();
}
npy_intp n = 1;
ret = strided_loop(&context, data, &n, strides, auxdata);
NPY_AUXDATA_FREE(auxdata);
if (ret == 0) {
if (PyErr_Occurred()) {
ret = -1;
goto bail;
}
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
// Check for any unmasked floating point errors (note: faster
// than _check_ufunc_fperr as one doesn't need mask up front).
int fpe_errors = npy_get_floatstatus();
if (fpe_errors) {
if (PyUFunc_GiveFloatingpointErrors(
ufunc_get_name_cstr(ufunc), fpe_errors) < 0) {
ret = -1; // Real error, falling back would not help.
goto bail;
}
}
}
*result = PyArray_Scalar(out, dt, NULL);
if (*result == NULL) {
ret = -1; // Real error (should never happen).
}
}
bail:
Py_DECREF(dt);
return ret;
}
/*
* Main ufunc call implementation.
*
* This implementation makes use of the "fastcall" way of passing keyword
* arguments and is called directly from `ufunc_generic_vectorcall` when
* Python has `tp_vectorcall` (Python 3.8+).
* If `tp_vectorcall` is not available, the dictionary `kwargs` are unpacked in
* `ufunc_generic_call` with fairly little overhead.
*/
static PyObject *
ufunc_generic_fastcall(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames,
npy_bool outer)
{
int errval;
int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs;
if (len_args == 1 && kwnames == NULL && !PyArray_Check(args[0])
&& nin == 1 && nout == 1 && !ufunc->core_enabled) {
// Possibly scalar input, try the fast path, falling back on failure.
PyObject *result = NULL;
if (try_trivial_scalar_call(ufunc, args[0], &result) != -2) {
return result;
}
}
/* All following variables are cleared in the `fail` error path */
ufunc_full_args full_args = {NULL, NULL};
PyArrayObject *wheremask = NULL;
/*
* Scratch space for operands, dtypes, etc. Note that operands and
* operation_descrs may hold an entry for the wheremask.
*/
NPY_ALLOC_WORKSPACE(scratch_objs, void *, UFUNC_STACK_NARGS * 4 + 2, nop * 4 + 2);
if (scratch_objs == NULL) {
return NULL;
}
memset(scratch_objs, 0, sizeof(void *) * (nop * 4 + 2));
PyArray_DTypeMeta **signature = (PyArray_DTypeMeta **)scratch_objs;
PyArrayObject **operands = (PyArrayObject **)(signature + nop);
PyArray_DTypeMeta **operand_DTypes = (PyArray_DTypeMeta **)(operands + nop + 1);
PyArray_Descr **operation_descrs = (PyArray_Descr **)(operand_DTypes + nop);
/*
* Note that the input (and possibly output) arguments are passed in as
* positional arguments. We extract these first and check for `out`
* passed by keyword later.
* Outputs and inputs are stored in `full_args.in` and `full_args.out`
* as tuples (or NULL when no outputs are passed).
*/
/* Check number of arguments */
if (NPY_UNLIKELY((len_args < nin) || (len_args > nop))) {
const char *verb = (len_args == 1) ? "was" : "were";
PyErr_Format(PyExc_TypeError,
"%s() takes from %d to %d positional arguments but "
"%zd %s given",
ufunc_get_name_cstr(ufunc), nin, nop, len_args, verb);
goto fail;
}
/* Fetch input arguments. */
full_args.in = PyArray_TupleFromItems(ufunc->nin, args, 0);
if (full_args.in == NULL) {
goto fail;
}
/*
* If there are more arguments, they define the out args. Otherwise
* full_args.out is NULL for now, and the `out` kwarg may still be passed.
*/
npy_bool out_is_passed_by_position = len_args > nin;
if (out_is_passed_by_position) {
npy_bool all_none = NPY_TRUE;
full_args.out = PyTuple_New(nout);
if (full_args.out == NULL) {
goto fail;
}
for (int i = nin; i < nop; i++) {
PyObject *tmp;
if (i < (int)len_args) {
tmp = args[i];
if (tmp == Py_Ellipsis) {
PyErr_SetString(PyExc_TypeError,
"out=... is only allowed as a keyword argument.");
goto fail;
}
if (tmp != Py_None) {
all_none = NPY_FALSE;
}
}
else {
tmp = Py_None;
}
Py_INCREF(tmp);
PyTuple_SET_ITEM(full_args.out, i-nin, tmp);
}
/* Extra positional args but no keywords */
/* DEPRECATED NumPy 2.4, 2025-08 */
if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) {
if (DEPRECATE(
"Passing more than 2 positional arguments to np.maximum and np.minimum "
"is deprecated. If you meant to use the third argument as an output, "
"use the `out` keyword argument instead. If you hoped to work with "
"more than 2 inputs, combine them into a single array and get the extrema "
"for the relevant axis.") < 0) {
goto fail;
}
}
if (all_none) {
Py_SETREF(full_args.out, NULL);
}
}
else {
full_args.out = NULL;
}
/*
* We have now extracted (but not converted) the input arguments.
* To simplify overrides, extract all other arguments (as objects only)
*/
PyObject *out_obj = NULL, *where_obj = NULL;
PyObject *axes_obj = NULL, *axis_obj = NULL;
PyObject *keepdims_obj = NULL, *casting_obj = NULL, *order_obj = NULL;
PyObject *subok_obj = NULL, *signature_obj = NULL, *sig_obj = NULL;
PyObject *dtype_obj = NULL;
/* Typically, NumPy defaults to returnin scalars for 0-D results */
npy_bool return_scalar = NPY_TRUE;
/* Skip parsing if there are no keyword arguments, nothing left to do */
if (kwnames != NULL) {
if (!ufunc->core_enabled) {
NPY_PREPARE_ARGPARSER;
if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames,
"$out", NULL, &out_obj,
"$where", NULL, &where_obj,
"$casting", NULL, &casting_obj,
"$order", NULL, &order_obj,
"$subok", NULL, &subok_obj,
"$dtype", NULL, &dtype_obj,
"$signature", NULL, &signature_obj,
"$sig", NULL, &sig_obj,
NULL, NULL, NULL) < 0) {
goto fail;
}
}
else {
NPY_PREPARE_ARGPARSER;
if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames,
"$out", NULL, &out_obj,
"$axes", NULL, &axes_obj,
"$axis", NULL, &axis_obj,
"$keepdims", NULL, &keepdims_obj,
"$casting", NULL, &casting_obj,
"$order", NULL, &order_obj,
"$subok", NULL, &subok_obj,
"$dtype", NULL, &dtype_obj,
"$signature", NULL, &signature_obj,
"$sig", NULL, &sig_obj,
NULL, NULL, NULL) < 0) {
goto fail;
}
if (NPY_UNLIKELY((axes_obj != NULL) && (axis_obj != NULL))) {
PyErr_SetString(PyExc_TypeError,
"cannot specify both 'axis' and 'axes'");
goto fail;
}
}
/* Handle `out` arguments passed by keyword */
if (out_obj != NULL) {
if (out_is_passed_by_position) {
PyErr_SetString(PyExc_TypeError,
"cannot specify 'out' as both a "
"positional and keyword argument");
goto fail;
}
if (out_obj == Py_Ellipsis) {
return_scalar = NPY_FALSE;
}
else if (_set_full_args_out(nout, out_obj, &full_args) < 0) {
goto fail;
}
}
/*
* Only one of signature, sig, and dtype should be passed. If `sig`
* was passed, this puts it into `signature_obj` instead (these
* are borrowed references).
*/
if (_check_and_copy_sig_to_signature(
sig_obj, signature_obj, dtype_obj, &signature_obj) < 0) {
goto fail;
}
}
char *method;
if (!outer) {
method = "__call__";
}
else {
method = "outer";
}
/* We now have all the information required to check for Overrides */
PyObject *override = NULL;
errval = PyUFunc_CheckOverride(ufunc, method,
full_args.in, full_args.out, where_obj,
args, len_args, kwnames, &override);
if (errval) {
goto fail;
}
else if (override) {
Py_DECREF(full_args.in);
Py_XDECREF(full_args.out);
return override;
}
/* Warn if "where" is used without "out", issue 29561 */
if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) {
if (PyErr_WarnEx(PyExc_UserWarning,
"'where' used without 'out', expect unitialized memory in output. "
"If this is intentional, use out=None.", 1) < 0) {
goto fail;
}
}
if (outer) {
/* Outer uses special preparation of inputs (expand dims) */
PyObject *new_in = prepare_input_arguments_for_outer(full_args.in, ufunc);
if (new_in == NULL) {
goto fail;
}
Py_SETREF(full_args.in, new_in);
}
/*
* Parse the passed `dtype` or `signature` into an array containing
* PyArray_DTypeMeta and/or None.
*/
if (_get_fixed_signature(ufunc,
dtype_obj, signature_obj, signature) < 0) {
goto fail;
}
NPY_ORDER order = NPY_KEEPORDER;
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
npy_bool subok = NPY_TRUE;
int keepdims = -1; /* We need to know if it was passed */
npy_bool force_legacy_promotion;
npy_bool promoting_pyscalars;
if (convert_ufunc_arguments(ufunc,
/* extract operand related information: */
full_args, operands,
operand_DTypes,
&force_legacy_promotion,
&promoting_pyscalars,
/* extract general information: */
order_obj, &order,
casting_obj, &casting,
subok_obj, &subok,
where_obj, &wheremask,
keepdims_obj, &keepdims) < 0) {
goto fail;
}
/*
* Note that part of the promotion is to the complete the signature
* (until here it only represents the fixed part and is usually NULLs).
*
* After promotion, we could push the following logic into the ArrayMethod
* in the future. For now, we do it here. The type resolution step can
* be shared between the ufunc and gufunc code.
*/
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature,
operand_DTypes, force_legacy_promotion,
promoting_pyscalars, NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
/* Find the correct descriptors for the operation */
if (resolve_descriptors(nop, ufunc, ufuncimpl,
operands, operation_descrs, signature, operand_DTypes,
full_args.in, casting) < 0) {
goto fail;
}
/*
* Do the final preparations and call the inner-loop.
*/
if (!ufunc->core_enabled) {
errval = PyUFunc_GenericFunctionInternal(ufunc, ufuncimpl,
operation_descrs, operands, casting, order,
wheremask);
}
else {
errval = PyUFunc_GeneralizedFunctionInternal(ufunc, ufuncimpl,
operation_descrs, operands, casting, order,
axis_obj, axes_obj, keepdims);
}
if (errval < 0) {
goto fail;
}
/*
* Clear all variables which are not needed any further.
* (From here on, we cannot `goto fail` any more.)
*/
Py_XDECREF(wheremask);
for (int i = 0; i < nop; i++) {
Py_DECREF(signature[i]);
Py_XDECREF(operand_DTypes[i]);
Py_DECREF(operation_descrs[i]);
if (i < nin) {
Py_DECREF(operands[i]);
}
}
/* The following steals the references to the outputs: */
PyObject *result = replace_with_wrapped_result_and_return(ufunc,
full_args, subok, operands+nin, return_scalar);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
npy_free_workspace(scratch_objs);
return result;
fail:
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(operands[i]);
Py_XDECREF(signature[i]);
Py_XDECREF(operand_DTypes[i]);
Py_XDECREF(operation_descrs[i]);
}
npy_free_workspace(scratch_objs);
return NULL;
}
/*
* Implement vectorcallfunc which should be defined with Python 3.8+.
* In principle this could be backported, but the speed gain seems moderate
* since ufunc calls often do not have keyword arguments and always have
* a large overhead. The only user would potentially be cython probably.
*/
static PyObject *
ufunc_generic_vectorcall(PyObject *ufunc,
PyObject *const *args, size_t len_args, PyObject *kwnames)
{
/*
* Unlike METH_FASTCALL, `len_args` may have a flag to signal that
* args[-1] may be (temporarily) used. So normalize it here.
*/
return ufunc_generic_fastcall((PyUFuncObject *)ufunc,
args, PyVectorcall_NARGS(len_args), kwnames, NPY_FALSE);
}
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func,
PyUFuncGenericFunction newfunc,
const int *signature,
PyUFuncGenericFunction *oldfunc)
{
int i, j;
int res = -1;
/* Find the location of the matching signature */
for (i = 0; i < func->ntypes; i++) {
for (j = 0; j < func->nargs; j++) {
if (signature[j] != func->types[i*func->nargs+j]) {
break;
}
}
if (j < func->nargs) {
continue;
}
if (oldfunc != NULL) {
*oldfunc = func->functions[i];
}
func->functions[i] = newfunc;
res = 0;
break;
}
return res;
}
/*UFUNC_API*/
NPY_NO_EXPORT PyObject *
PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void *const *data,
const char *types, int ntypes,
int nin, int nout, int identity,
const char *name, const char *doc, int unused)
{
return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes,
nin, nout, identity, name, doc, unused, NULL);
}
/*UFUNC_API*/
NPY_NO_EXPORT PyObject *
PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void *const *data,
const char *types, int ntypes,
int nin, int nout, int identity,
const char *name, const char *doc,
int unused, const char *signature)
{
return PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
func, data, types, ntypes, nin, nout, identity, name, doc,
unused, signature, NULL);
}
/*UFUNC_API*/
NPY_NO_EXPORT PyObject *
PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, void *const *data,
const char *types, int ntypes,
int nin, int nout, int identity,
const char *name, const char *doc,
const int unused, const char *signature,
PyObject *identity_value)
{
PyUFuncObject *ufunc;
if (nin + nout > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"Cannot construct a ufunc with more than %d operands "
"(requested number were: inputs = %d and outputs = %d)",
NPY_MAXARGS, nin, nout);
return NULL;
}
ufunc = PyObject_GC_New(PyUFuncObject, &PyUFunc_Type);
/*
* We use GC_New here for ufunc->obj, but do not use GC_Track since
* ufunc->obj is still NULL at the end of this function.
* See ufunc_frompyfunc where ufunc->obj is set and GC_Track is called.
*/
if (ufunc == NULL) {
return NULL;
}
ufunc->nin = nin;
ufunc->nout = nout;
ufunc->nargs = nin+nout;
ufunc->identity = identity;
if (ufunc->identity == PyUFunc_IdentityValue) {
Py_INCREF(identity_value);
ufunc->identity_value = identity_value;
}
else {
ufunc->identity_value = NULL;
}
ufunc->functions = func;
ufunc->data = data;
ufunc->types = types;
ufunc->ntypes = ntypes;
ufunc->core_signature = NULL;
ufunc->core_enabled = 0;
ufunc->obj = NULL;
ufunc->dict = NULL;
ufunc->core_num_dims = NULL;
ufunc->core_num_dim_ix = 0;
ufunc->core_offsets = NULL;
ufunc->core_dim_ixs = NULL;
ufunc->core_dim_sizes = NULL;
ufunc->core_dim_flags = NULL;
ufunc->userloops = NULL;
ufunc->ptr = NULL;
ufunc->vectorcall = &ufunc_generic_vectorcall;
ufunc->reserved1 = 0;
ufunc->iter_flags = 0;
/* Type resolution and inner loop selection functions */
ufunc->type_resolver = &PyUFunc_DefaultTypeResolver;
ufunc->process_core_dims_func = NULL;
ufunc->op_flags = NULL;
ufunc->_loops = NULL;
if (nin + nout != 0) {
ufunc->_dispatch_cache = PyArrayIdentityHash_New(nin + nout);
if (ufunc->_dispatch_cache == NULL) {
Py_DECREF(ufunc);
return NULL;
}
}
else {
/*
* Work around a test that seems to do this right now, it should not
* be a valid ufunc at all though, so. TODO: Remove...
*/
ufunc->_dispatch_cache = NULL;
}
ufunc->_loops = PyList_New(0);
if (ufunc->_loops == NULL) {
Py_DECREF(ufunc);
return NULL;
}
if (name == NULL) {
ufunc->name = "?";
}
else {
ufunc->name = name;
}
ufunc->doc = doc;
ufunc->op_flags = PyArray_malloc(sizeof(npy_uint32)*ufunc->nargs);
if (ufunc->op_flags == NULL) {
Py_DECREF(ufunc);
return PyErr_NoMemory();
}
memset(ufunc->op_flags, 0, sizeof(npy_uint32)*ufunc->nargs);
if (signature != NULL) {
if (_parse_signature(ufunc, signature) != 0) {
Py_DECREF(ufunc);
return NULL;
}
}
const char *curr_types = ufunc->types;
for (int i = 0; i < ntypes * (nin + nout); i += nin + nout) {
/*
* Add all legacy wrapping loops here. This is normally not necessary,
* but makes sense. It could also help/be needed to avoid issues with
* ambiguous loops such as: `OO->?` and `OO->O` where in theory the
* wrong loop could be picked if only the second one is added.
*/
PyObject *info;
PyArray_DTypeMeta *op_dtypes[NPY_MAXARGS];
for (int arg = 0; arg < nin + nout; arg++) {
op_dtypes[arg] = PyArray_DTypeFromTypeNum(curr_types[arg]);
/* These DTypes are immortal and adding INCREFs: so borrow it */
Py_DECREF(op_dtypes[arg]);
}
curr_types += nin + nout;
info = add_and_return_legacy_wrapping_ufunc_loop(ufunc, op_dtypes, 1);
if (info == NULL) {
Py_DECREF(ufunc);
return NULL;
}
}
ufunc->dict = PyDict_New();
if (ufunc->dict == NULL) {
Py_DECREF(ufunc);
return NULL;
}
/*
* TODO: I tried adding a default promoter here (either all object for
* some special cases, or all homogeneous). Those are reasonable
* defaults, but short-cut a deprecated SciPy loop, where the
* homogeneous loop `ddd->d` was deprecated, but an inhomogeneous
* one `dld->d` should be picked.
* The default promoter *is* a reasonable default, but switched that
* behaviour.
* Another problem appeared due to buggy type-resolution for
* datetimes, this meant that `timedelta.sum(dtype="f8")` returned
* datetimes (and not floats or error), arguably wrong, but...
*/
return (PyObject *)ufunc;
}
/*
* This is the first-part of the CObject structure.
*
* I don't think this will change, but if it should, then
* this needs to be fixed. The exposed C-API was insufficient
* because I needed to replace the pointer and it wouldn't
* let me with a destructor set (even though it works fine
* with the destructor).
*/
typedef struct {
PyObject_HEAD
void *c_obj;
} _simple_cobj;
#define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val)
/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 */
static int
cmp_arg_types(int *arg1, int *arg2, int n)
{
for (; n > 0; n--, arg1++, arg2++) {
if (PyArray_EquivTypenums(*arg1, *arg2)) {
continue;
}
if (PyArray_CanCastSafely(*arg1, *arg2)) {
return -1;
}
return 1;
}
return 0;
}
/*
* This frees the linked-list structure when the CObject
* is destroyed (removed from the internal dictionary)
*/
static inline void
_free_loop1d_list(PyUFunc_Loop1d *data)
{
int i;
while (data != NULL) {
PyUFunc_Loop1d *next = data->next;
PyArray_free(data->arg_types);
if (data->arg_dtypes != NULL) {
for (i = 0; i < data->nargs; i++) {
Py_DECREF(data->arg_dtypes[i]);
}
PyArray_free(data->arg_dtypes);
}
PyArray_free(data);
data = next;
}
}
static void
_loop1d_list_free(PyObject *ptr)
{
PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)PyCapsule_GetPointer(ptr, NULL);
_free_loop1d_list(data);
}
/*
* This function allows the user to register a 1-d loop with an already
* created ufunc. This function is similar to RegisterLoopForType except
* that it allows a 1-d loop to be registered with PyArray_Descr objects
* instead of dtype type num values. This allows a 1-d loop to be registered
* for a structured array dtype or a custom dtype. The ufunc is called
* whenever any of it's input arguments match the user_dtype argument.
*
* ufunc - ufunc object created from call to PyUFunc_FromFuncAndData
* user_dtype - dtype that ufunc will be registered with
* function - 1-d loop function pointer
* arg_dtypes - array of dtype objects describing the ufunc operands
* data - arbitrary data pointer passed in to loop function
*
* returns 0 on success, -1 for failure
*/
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
PyArray_Descr *user_dtype,
PyUFuncGenericFunction function,
PyArray_Descr **arg_dtypes,
void *data)
{
int i;
int result = 0;
int *arg_typenums;
PyObject *key, *cobj;
if (user_dtype == NULL) {
PyErr_SetString(PyExc_TypeError,
"unknown user defined struct dtype");
return -1;
}
key = PyLong_FromLong((long) user_dtype->type_num);
if (key == NULL) {
return -1;
}
arg_typenums = PyArray_malloc(ufunc->nargs * sizeof(int));
if (arg_typenums == NULL) {
Py_DECREF(key);
PyErr_NoMemory();
return -1;
}
if (arg_dtypes != NULL) {
for (i = 0; i < ufunc->nargs; i++) {
arg_typenums[i] = arg_dtypes[i]->type_num;
}
}
else {
for (i = 0; i < ufunc->nargs; i++) {
arg_typenums[i] = user_dtype->type_num;
}
}
result = PyUFunc_RegisterLoopForType(ufunc, user_dtype->type_num,
function, arg_typenums, data);
if (result == 0) {
cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK
if (cobj == NULL && PyErr_Occurred()) {
result = -1;
}
else if (cobj == NULL) {
PyErr_SetString(PyExc_KeyError,
"userloop for user dtype not found");
result = -1;
}
else {
int cmp = 1;
PyUFunc_Loop1d *current = PyCapsule_GetPointer(cobj, NULL);
if (current == NULL) {
result = -1;
goto done;
}
while (current != NULL) {
cmp = cmp_arg_types(current->arg_types,
arg_typenums, ufunc->nargs);
if (cmp >= 0 && current->arg_dtypes == NULL) {
break;
}
current = current->next;
}
if (cmp == 0 && current != NULL && current->arg_dtypes == NULL) {
current->arg_dtypes = PyArray_malloc(ufunc->nargs *
sizeof(PyArray_Descr*));
if (current->arg_dtypes == NULL) {
PyErr_NoMemory();
result = -1;
goto done;
}
else if (arg_dtypes != NULL) {
for (i = 0; i < ufunc->nargs; i++) {
current->arg_dtypes[i] = arg_dtypes[i];
Py_INCREF(current->arg_dtypes[i]);
}
}
else {
for (i = 0; i < ufunc->nargs; i++) {
current->arg_dtypes[i] = user_dtype;
Py_INCREF(current->arg_dtypes[i]);
}
}
current->nargs = ufunc->nargs;
}
else {
PyErr_SetString(PyExc_RuntimeError,
"loop already registered");
result = -1;
}
}
}
done:
PyArray_free(arg_typenums);
Py_DECREF(key);
return result;
}
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
int usertype,
PyUFuncGenericFunction function,
const int *arg_types,
void *data)
{
PyArray_Descr *descr;
PyUFunc_Loop1d *funcdata;
PyObject *key, *cobj;
PyArray_DTypeMeta *signature[NPY_MAXARGS];
PyObject *signature_tuple = NULL;
int i;
int *newtypes=NULL;
descr=PyArray_DescrFromType(usertype);
if ((usertype < NPY_USERDEF && usertype != NPY_VOID) || (descr==NULL)) {
PyErr_SetString(PyExc_TypeError, "unknown user-defined type");
return -1;
}
Py_DECREF(descr);
if (ufunc->userloops == NULL) {
ufunc->userloops = PyDict_New();
}
key = PyLong_FromLong((long) usertype);
if (key == NULL) {
return -1;
}
funcdata = PyArray_malloc(sizeof(PyUFunc_Loop1d));
if (funcdata == NULL) {
goto fail;
}
newtypes = PyArray_malloc(sizeof(int)*ufunc->nargs);
if (newtypes == NULL) {
goto fail;
}
if (arg_types != NULL) {
for (i = 0; i < ufunc->nargs; i++) {
newtypes[i] = arg_types[i];
signature[i] = PyArray_DTypeFromTypeNum(arg_types[i]);
Py_DECREF(signature[i]); /* DType can't be deleted... */
}
}
else {
for (i = 0; i < ufunc->nargs; i++) {
newtypes[i] = usertype;
signature[i] = PyArray_DTypeFromTypeNum(usertype);
Py_DECREF(signature[i]); /* DType can't be deleted... */
}
}
signature_tuple = PyArray_TupleFromItems(
ufunc->nargs, (PyObject **)signature, 0);
if (signature_tuple == NULL) {
goto fail;
}
/*
* We add the loop to the list of all loops and promoters. If the
* equivalent loop was already added, skip this.
* Note that even then the ufunc is still modified: The legacy ArrayMethod
* already looks up the inner-loop from the ufunc (and this is replaced
* below!).
* If the existing one is not a legacy ArrayMethod, we raise currently:
* A new-style loop should not be replaced by an old-style one.
*/
int add_new_loop = 1;
for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) {
PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK
PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0);
int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ);
if (cmp < 0) {
goto fail;
}
if (!cmp) {
continue;
}
PyObject *registered = PyTuple_GET_ITEM(item, 1);
if (!PyObject_TypeCheck(registered, &PyArrayMethod_Type) || (
(PyArrayMethodObject *)registered)->get_strided_loop !=
&get_wrapped_legacy_ufunc_loop) {
PyErr_Format(PyExc_TypeError,
"A non-compatible loop was already registered for "
"ufunc %s and DTypes %S.",
ufunc_get_name_cstr(ufunc), signature_tuple);
goto fail;
}
/* The loop was already added */
add_new_loop = 0;
break;
}
if (add_new_loop) {
PyObject *info = add_and_return_legacy_wrapping_ufunc_loop(
ufunc, signature, 0);
if (info == NULL) {
goto fail;
}
}
/* Clearing sets it to NULL for the error paths */
Py_CLEAR(signature_tuple);
funcdata->func = function;
funcdata->arg_types = newtypes;
funcdata->data = data;
funcdata->next = NULL;
funcdata->arg_dtypes = NULL;
funcdata->nargs = 0;
/* Get entry for this user-defined type*/
cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK
if (cobj == NULL && PyErr_Occurred()) {
goto fail;
}
/* If it's not there, then make one and return. */
else if (cobj == NULL) {
cobj = PyCapsule_New((void *)funcdata, NULL, _loop1d_list_free);
if (cobj == NULL) {
goto fail;
}
PyDict_SetItem(ufunc->userloops, key, cobj);
Py_DECREF(cobj);
Py_DECREF(key);
return 0;
}
else {
PyUFunc_Loop1d *current, *prev = NULL;
int cmp = 1;
/*
* There is already at least 1 loop. Place this one in
* lexicographic order. If the next one signature
* is exactly like this one, then just replace.
* Otherwise insert.
*/
current = PyCapsule_GetPointer(cobj, NULL);
if (current == NULL) {
goto fail;
}
while (current != NULL) {
cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs);
if (cmp >= 0) {
break;
}
prev = current;
current = current->next;
}
if (cmp == 0) {
/* just replace it with new function */
current->func = function;
current->data = data;
PyArray_free(newtypes);
PyArray_free(funcdata);
}
else {
/*
* insert it before the current one by hacking the internals
* of cobject to replace the function pointer --- can't use
* CObject API because destructor is set.
*/
funcdata->next = current;
if (prev == NULL) {
/* place this at front */
_SETCPTR(cobj, funcdata);
}
else {
prev->next = funcdata;
}
}
}
Py_DECREF(key);
return 0;
fail:
Py_DECREF(key);
Py_XDECREF(signature_tuple);
PyArray_free(funcdata);
PyArray_free(newtypes);
if (!PyErr_Occurred()) PyErr_NoMemory();
return -1;
}
#undef _SETCPTR
static void
ufunc_dealloc(PyUFuncObject *ufunc)
{
PyObject_GC_UnTrack((PyObject *)ufunc);
PyArray_free(ufunc->core_num_dims);
PyArray_free(ufunc->core_dim_ixs);
PyArray_free(ufunc->core_dim_sizes);
PyArray_free(ufunc->core_dim_flags);
PyArray_free(ufunc->core_offsets);
PyArray_free(ufunc->core_signature);
PyArray_free(ufunc->ptr);
PyArray_free(ufunc->op_flags);
Py_XDECREF(ufunc->userloops);
if (ufunc->identity == PyUFunc_IdentityValue) {
Py_DECREF(ufunc->identity_value);
}
Py_XDECREF(ufunc->obj);
Py_XDECREF(ufunc->dict);
Py_XDECREF(ufunc->_loops);
if (ufunc->_dispatch_cache != NULL) {
PyArrayIdentityHash_Dealloc(ufunc->_dispatch_cache);
}
PyObject_GC_Del(ufunc);
}
static PyObject *
ufunc_repr(PyUFuncObject *ufunc)
{
return PyUnicode_FromFormat("<ufunc '%s'>", ufunc->name);
}
static int
ufunc_traverse(PyUFuncObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->obj);
if (self->identity == PyUFunc_IdentityValue) {
Py_VISIT(self->identity_value);
}
Py_VISIT(self->dict);
return 0;
}
/******************************************************************************
*** UFUNC METHODS ***
*****************************************************************************/
/*
* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b)
* where a has b.ndim NewAxis terms appended.
*
* The result has dimensions a.ndim + b.ndim
*/
static PyObject *
ufunc_outer(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
if (ufunc->core_enabled) {
PyErr_Format(PyExc_TypeError,
"method outer is not allowed in ufunc with non-trivial"\
" signature");
return NULL;
}
if (ufunc->nin != 2) {
PyErr_SetString(PyExc_ValueError,
"outer product only supported "\
"for binary functions");
return NULL;
}
if (len_args != 2) {
PyErr_SetString(PyExc_TypeError, "exactly two arguments expected");
return NULL;
}
return ufunc_generic_fastcall(ufunc, args, len_args, kwnames, NPY_TRUE);
}
static PyObject *
prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc)
{
PyArrayObject *ap1 = NULL;
PyObject *tmp;
npy_cache_import_runtime("numpy", "matrix",
&npy_runtime_imports.numpy_matrix);
const char *matrix_deprecation_msg = (
"%s.outer() was passed a numpy matrix as %s argument. "
"Special handling of matrix is removed. Convert to a "
"ndarray via 'matrix.A' ");
tmp = PyTuple_GET_ITEM(args, 0);
if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) {
PyErr_Format(PyExc_TypeError,
matrix_deprecation_msg, ufunc->name, "first");
return NULL;
}
else {
ap1 = (PyArrayObject *) PyArray_FROM_O(tmp);
}
if (ap1 == NULL) {
return NULL;
}
PyArrayObject *ap2 = NULL;
tmp = PyTuple_GET_ITEM(args, 1);
if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) {
PyErr_Format(PyExc_TypeError,
matrix_deprecation_msg, ufunc->name, "second");
return NULL;
}
else {
ap2 = (PyArrayObject *) PyArray_FROM_O(tmp);
}
if (ap2 == NULL) {
Py_DECREF(ap1);
return NULL;
}
/* Construct new shape from ap1 and ap2 and then reshape */
PyArray_Dims newdims;
npy_intp newshape[NPY_MAXDIMS];
newdims.len = PyArray_NDIM(ap1) + PyArray_NDIM(ap2);
newdims.ptr = newshape;
if (newdims.len > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
"maximum supported dimension for an ndarray is %d, but "
"`%s.outer()` result would have %d.",
NPY_MAXDIMS, ufunc->name, newdims.len);
goto fail;
}
if (newdims.ptr == NULL) {
goto fail;
}
memcpy(newshape, PyArray_DIMS(ap1), PyArray_NDIM(ap1) * sizeof(npy_intp));
for (int i = PyArray_NDIM(ap1); i < newdims.len; i++) {
newshape[i] = 1;
}
PyArrayObject *ap_new;
ap_new = (PyArrayObject *)PyArray_Newshape(ap1, &newdims, NPY_CORDER);
if (ap_new == NULL) {
goto fail;
}
if (PyArray_NDIM(ap_new) != newdims.len ||
!PyArray_CompareLists(PyArray_DIMS(ap_new), newshape, newdims.len)) {
PyErr_Format(PyExc_TypeError,
"%s.outer() called with ndarray-subclass of type '%s' "
"which modified its shape after a reshape. `outer()` relies "
"on reshaping the inputs and is for example not supported for "
"the 'np.matrix' class (the usage of matrix is generally "
"discouraged). "
"To work around this issue, please convert the inputs to "
"numpy arrays.",
ufunc->name, Py_TYPE(ap_new)->tp_name);
Py_DECREF(ap_new);
goto fail;
}
Py_DECREF(ap1);
return Py_BuildValue("(NN)", ap_new, ap2);
fail:
Py_XDECREF(ap1);
Py_XDECREF(ap2);
return NULL;
}
static PyObject *
ufunc_reduce(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
return PyUFunc_GenericReduction(
ufunc, args, len_args, kwnames, UFUNC_REDUCE);
}
static PyObject *
ufunc_accumulate(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
return PyUFunc_GenericReduction(
ufunc, args, len_args, kwnames, UFUNC_ACCUMULATE);
}
static PyObject *
ufunc_reduceat(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
return PyUFunc_GenericReduction(
ufunc, args, len_args, kwnames, UFUNC_REDUCEAT);
}
/* Helper for ufunc_at, below */
static inline PyArrayObject *
new_array_op(PyArrayObject *op_array, char *data)
{
npy_intp dims[1] = {1};
Py_INCREF(PyArray_DESCR(op_array)); /* NewFromDescr steals a reference */
PyObject *r = PyArray_NewFromDescr(&PyArray_Type, PyArray_DESCR(op_array),
1, dims, NULL, data,
NPY_ARRAY_WRITEABLE, NULL);
return (PyArrayObject *)r;
}
/*
* Use an indexed loop to do the work
* Returns 0 if successful
*/
static int
trivial_at_loop(PyArrayMethodObject *ufuncimpl, NPY_ARRAYMETHOD_FLAGS flags,
PyArrayMapIterObject *iter,
PyArrayObject *op1_array, PyArrayObject *op2_array,
PyArrayMethod_Context *context)
{
int buffersize=0, errormask = 0;
int res;
char *args[3];
npy_intp steps[4];
args[0] = (char *) iter->baseoffset;
steps[0] = iter->fancy_strides[0];
if (ufuncimpl->nin == 1) {
args[2] = NULL;
steps[2] = 0;
} else {
args[2] = (char *)PyArray_DATA(op2_array);
if (PyArray_NDIM(op2_array) == 0
|| PyArray_DIM(op2_array, 0) <= 1) {
steps[2] = 0;
} else {
steps[2] = PyArray_STRIDE(op2_array, 0);
}
}
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
npy_clear_floatstatus_barrier((char *)context);
}
do {
npy_intp *inner_size = NpyIter_GetInnerLoopSizePtr(iter->outer);
npy_intp * indxP = (npy_intp *)iter->outer_ptrs[0];
args[1] = (char *)indxP;
steps[1] = iter->outer_strides[0];
/*
* The value of iter->fancy_dims[0] is added to negative indexes
* inside the inner loop
*/
steps[3] = iter->fancy_dims[0];
res = ufuncimpl->contiguous_indexed_loop(
context, args, inner_size, steps, NULL);
if (args[2] != NULL) {
args[2] += (*inner_size) * steps[2];
}
} while (res == 0 && iter->outer_next(iter->outer));
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
const char * ufunc_name =
ufunc_get_name_cstr((PyUFuncObject *)context->caller);
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return -1;
}
res = _check_ufunc_fperr(errormask, ufunc_name);
}
return res;
}
static int
ufunc_at__fast_iter(PyUFuncObject *ufunc, NPY_ARRAYMETHOD_FLAGS flags,
PyArrayMapIterObject *iter, PyArrayIterObject *iter2,
PyArrayObject *op1_array, PyArrayObject *op2_array,
PyArrayMethod_StridedLoop *strided_loop,
PyArrayMethod_Context *context,
NpyAuxData *auxdata
)
{
int buffersize;
int errormask = 0;
int res = 0;
NPY_BEGIN_THREADS_DEF;
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return -1;
}
int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* Start with the floating-point exception flags cleared */
npy_clear_floatstatus_barrier((char*)&iter);
}
if (!needs_api) {
NPY_BEGIN_THREADS;
}
npy_intp strides[3] = {0, 0, 0};
/*
* Iterate over first and second operands and call ufunc
* for each pair of inputs
*/
for (npy_intp i = iter->size; i > 0; i--)
{
char *dataptr[3];
/* one element at a time, no stride required but read by innerloop */
npy_intp count = 1;
/*
* Set up data pointers for either one or two input operands.
* The output data pointer points to the first operand data.
*/
dataptr[0] = iter->dataptr;
if (iter2 != NULL) {
dataptr[1] = PyArray_ITER_DATA(iter2);
dataptr[2] = iter->dataptr;
}
else {
dataptr[1] = iter->dataptr;
dataptr[2] = NULL;
}
res = strided_loop(context, dataptr, &count, strides, auxdata);
if (res != 0) {
break;
}
PyArray_MapIterNext(iter);
if (iter2 != NULL) {
PyArray_ITER_NEXT(iter2);
}
}
NPY_END_THREADS;
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
res = _check_ufunc_fperr(errormask, "at");
}
return res;
}
static int
ufunc_at__slow_iter(PyUFuncObject *ufunc, NPY_ARRAYMETHOD_FLAGS flags,
PyArrayMapIterObject *iter, PyArrayIterObject *iter2,
PyArrayObject *op1_array, PyArrayObject *op2_array,
PyArray_Descr *operation_descrs[3],
PyArrayMethod_StridedLoop *strided_loop,
PyArrayMethod_Context *context,
NpyAuxData *auxdata
)
{
NpyIter *iter_buffer = NULL;
PyArrayObject *array_operands[3] = {NULL, NULL, NULL};
int buffersize;
int errormask = 0;
int res = 0;
int nop = 0;
NpyIter_IterNextFunc *iternext;
char * err_msg = NULL;
NPY_BEGIN_THREADS_DEF;
if (_get_bufsize_errmask(&buffersize, &errormask) < 0) {
return -1;
}
array_operands[0] = new_array_op(op1_array, iter->dataptr);
if (iter2 != NULL) {
array_operands[1] = new_array_op(op2_array, PyArray_ITER_DATA(iter2));
array_operands[2] = new_array_op(op1_array, iter->dataptr);
nop = 3;
}
else {
array_operands[1] = new_array_op(op1_array, iter->dataptr);
array_operands[2] = NULL;
nop = 2;
}
/* Set up the flags */
npy_uint32 op_flags[3];
op_flags[0] = NPY_ITER_READONLY|
NPY_ITER_ALIGNED;
if (iter2 != NULL) {
op_flags[1] = NPY_ITER_READONLY|
NPY_ITER_ALIGNED;
op_flags[2] = NPY_ITER_WRITEONLY|
NPY_ITER_ALIGNED|
NPY_ITER_ALLOCATE|
NPY_ITER_NO_BROADCAST|
NPY_ITER_NO_SUBTYPE;
}
else {
op_flags[1] = NPY_ITER_WRITEONLY|
NPY_ITER_ALIGNED|
NPY_ITER_ALLOCATE|
NPY_ITER_NO_BROADCAST|
NPY_ITER_NO_SUBTYPE;
}
/*
* Create NpyIter object to "iterate" over single element of each input
* operand. This is an easy way to reuse the NpyIter logic for dealing
* with certain cases like casting operands to correct dtype. On each
* iteration over the MapIterArray object created above, we'll take the
* current data pointers from that and reset this NpyIter object using
* those data pointers, and then trigger a buffer copy. The buffer data
* pointers from the NpyIter object will then be passed to the inner loop
* function.
*/
iter_buffer = NpyIter_AdvancedNew(nop, array_operands,
NPY_ITER_EXTERNAL_LOOP|
NPY_ITER_REFS_OK|
NPY_ITER_ZEROSIZE_OK|
NPY_ITER_BUFFERED|
NPY_ITER_GROWINNER|
NPY_ITER_DELAY_BUFALLOC,
NPY_KEEPORDER, NPY_UNSAFE_CASTING,
op_flags, operation_descrs,
-1, NULL, NULL, buffersize);
if (iter_buffer == NULL) {
/* will fail only on memory allocation errors */
for (int i = 0; i < 3; i++) {
Py_XDECREF(array_operands[i]);
}
return -1;
}
iternext = NpyIter_GetIterNext(iter_buffer, NULL);
if (iternext == NULL) {
/* can not really happen, iter_buffer creation is tightly controlled */
NpyIter_Deallocate(iter_buffer);
for (int i = 0; i < 3; i++) {
Py_XDECREF(array_operands[i]);
}
return -1;
}
flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter_buffer));
int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* Start with the floating-point exception flags cleared */
npy_clear_floatstatus_barrier((char*)&iter);
}
npy_intp strides[3] = {0, 0, 0};
if (!needs_api) {
NPY_BEGIN_THREADS;
}
/*
* Iterate over first and second operands and call ufunc
* for each pair of inputs
*/
for (npy_intp i = iter->size; i > 0; i--)
{
char *dataptr[3];
char **buffer_dataptr;
/* one element at a time, no stride required but read by innerloop */
npy_intp count = 1;
/*
* Set up data pointers for either one or two input operands.
* The output data pointer points to the first operand data.
*/
dataptr[0] = iter->dataptr;
if (iter2 != NULL) {
dataptr[1] = PyArray_ITER_DATA(iter2);
dataptr[2] = iter->dataptr;
}
else {
dataptr[1] = iter->dataptr;
dataptr[2] = NULL;
}
/* Reset NpyIter data pointers which will trigger a buffer copy */
NpyIter_ResetBasePointers(iter_buffer, dataptr, &err_msg);
if (err_msg) {
res = -1;
break;
}
buffer_dataptr = NpyIter_GetDataPtrArray(iter_buffer);
res = strided_loop(context, buffer_dataptr, &count, strides, auxdata);
if (res != 0) {
break;
}
/*
* Call to iternext triggers copy from buffer back to output array
* after innerloop puts result in buffer.
*/
iternext(iter_buffer);
PyArray_MapIterNext(iter);
if (iter2 != NULL) {
PyArray_ITER_NEXT(iter2);
}
}
NPY_END_THREADS;
if (res != 0 && err_msg) {
PyErr_SetString(PyExc_ValueError, err_msg);
}
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
res = _check_ufunc_fperr(errormask, "at");
}
NpyIter_Deallocate(iter_buffer);
for (int i = 0; i < 3; i++) {
Py_XDECREF(array_operands[i]);
}
return res;
}
/*
* Call ufunc only on selected array items and store result in first operand.
* For add ufunc, method call is equivalent to op1[idx] += op2 with no
* buffering of the first operand.
* Arguments:
* op1 - First operand to ufunc
* idx - Indices that are applied to first operand. Equivalent to op1[idx].
* op2 - Second operand to ufunc (if needed). Must be able to broadcast
* over first operand.
*/
static PyObject *
ufunc_at(PyUFuncObject *ufunc, PyObject *args)
{
PyObject *op1 = NULL;
PyObject *idx = NULL;
PyObject *op2 = NULL;
PyArrayObject *op1_array = NULL;
PyArrayObject *op2_array = NULL;
PyArrayMapIterObject *iter = NULL;
PyArrayIterObject *iter2 = NULL;
PyArray_Descr *operation_descrs[3] = {NULL, NULL, NULL};
int nop;
/* override vars */
int errval;
PyObject *override = NULL;
int res = -1; /* start with fail condition so "goto fail" will error */
PyArrayMethod_StridedLoop *strided_loop;
NpyAuxData *auxdata = NULL;
if (ufunc->core_enabled) {
PyErr_Format(PyExc_TypeError,
"%s.at does not support ufunc with non-trivial signature: %s has signature %s.",
ufunc->name, ufunc->name, ufunc->core_signature);
return NULL;
}
if (ufunc->nin > 2) {
PyErr_SetString(PyExc_ValueError,
"Only unary and binary ufuncs supported at this time");
return NULL;
}
if (ufunc->nout != 1) {
PyErr_SetString(PyExc_ValueError,
"Only single output ufuncs supported at this time");
return NULL;
}
if (!PyArg_ParseTuple(args, "OO|O:at", &op1, &idx, &op2)) {
return NULL;
}
if (ufunc->nin == 2 && op2 == NULL) {
PyErr_SetString(PyExc_ValueError,
"second operand needed for ufunc");
return NULL;
}
if (ufunc->nin == 1 && op2 != NULL) {
PyErr_SetString(PyExc_ValueError,
"second operand provided when ufunc is unary");
return NULL;
}
errval = PyUFunc_CheckOverride(ufunc, "at",
args, NULL, NULL, NULL, 0, NULL, &override);
if (errval) {
return NULL;
}
else if (override) {
return override;
}
if (!PyArray_Check(op1)) {
PyErr_SetString(PyExc_TypeError,
"first operand must be array");
return NULL;
}
op1_array = (PyArrayObject *)op1;
/* Create second operand from number array if needed. */
if (op2 == NULL) {
nop = 2;
}
else {
nop = 3;
op2_array = (PyArrayObject *)PyArray_FromAny(op2, NULL,
0, 0, 0, NULL);
if (op2_array == NULL) {
goto fail;
}
}
PyArrayMethodObject *ufuncimpl = NULL;
{
/* Do all the dtype handling and find the correct ufuncimpl */
PyArrayObject *tmp_operands[3] = {NULL, NULL, NULL};
PyArray_DTypeMeta *signature[3] = {NULL, NULL, NULL};
PyArray_DTypeMeta *operand_DTypes[3] = {NULL, NULL, NULL};
/*
* Create dtypes array for either one or two input operands.
* Compare to the logic in `convert_ufunc_arguments`.
* TODO: It may be good to review some of this behaviour, since the
* operand array is special (it is written to) similar to reductions.
* Using unsafe-casting as done here, is likely not desirable.
*/
tmp_operands[0] = op1_array;
operand_DTypes[0] = NPY_DTYPE(PyArray_DESCR(op1_array));
Py_INCREF(operand_DTypes[0]);
int force_legacy_promotion = 0;
if (op2_array != NULL) {
tmp_operands[1] = op2_array;
operand_DTypes[1] = NPY_DTYPE(PyArray_DESCR(op2_array));
Py_INCREF(operand_DTypes[1]);
tmp_operands[2] = tmp_operands[0];
operand_DTypes[2] = operand_DTypes[0];
Py_INCREF(operand_DTypes[2]);
if ((PyArray_NDIM(op1_array) == 0)
!= (PyArray_NDIM(op2_array) == 0)) {
/* both are legacy and only one is 0-D: force legacy */
force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL);
}
}
else {
tmp_operands[1] = tmp_operands[0];
operand_DTypes[1] = operand_DTypes[0];
Py_INCREF(operand_DTypes[1]);
tmp_operands[2] = NULL;
}
ufuncimpl = promote_and_get_ufuncimpl(ufunc, tmp_operands, signature,
operand_DTypes, force_legacy_promotion,
NPY_FALSE, NPY_FALSE);
if (ufuncimpl == NULL) {
for (int i = 0; i < 3; i++) {
Py_XDECREF(signature[i]);
Py_XDECREF(operand_DTypes[i]);
}
goto fail;
}
/* Find the correct operation_descrs for the operation */
int resolve_result = resolve_descriptors(nop, ufunc, ufuncimpl,
tmp_operands, operation_descrs, signature, operand_DTypes, NULL, NPY_UNSAFE_CASTING);
for (int i = 0; i < 3; i++) {
Py_XDECREF(signature[i]);
Py_XDECREF(operand_DTypes[i]);
}
if (resolve_result < 0) {
goto fail;
}
}
iter = (PyArrayMapIterObject *)PyArray_MapIterArrayCopyIfOverlap(
op1_array, idx, 1, op2_array);
if (iter == NULL) {
goto fail;
}
op1_array = iter->array; /* May be updateifcopied on overlap */
if (op2_array != NULL) {
/*
* May need to swap axes so that second operand is
* iterated over correctly
*/
if ((iter->subspace != NULL) && (iter->consec)) {
PyArray_MapIterSwapAxes(iter, &op2_array, 0);
if (op2_array == NULL) {
/* only on memory allocation failure */
goto fail;
}
}
/*
* Create array iter object for second operand that
* "matches" the map iter object for the first operand.
* Then we can just iterate over the first and second
* operands at the same time and not have to worry about
* picking the correct elements from each operand to apply
* the ufunc to.
*/
if ((iter2 = (PyArrayIterObject *)\
PyArray_BroadcastToShape((PyObject *)op2_array,
iter->dimensions, iter->nd))==NULL) {
goto fail;
}
}
PyArrayMethod_Context context;
NPY_context_init(&context, operation_descrs);
context.caller = (PyObject *)ufunc;
context.method = ufuncimpl;
/* Use contiguous strides; if there is such a loop it may be faster */
npy_intp strides[3] = {
operation_descrs[0]->elsize, operation_descrs[1]->elsize, 0};
if (nop == 3) {
strides[2] = operation_descrs[2]->elsize;
}
NPY_ARRAYMETHOD_FLAGS flags;
if (ufuncimpl->get_strided_loop(&context, 1, 0, strides,
&strided_loop, &auxdata, &flags) < 0) {
goto fail;
}
int fast_path = 1;
/* check no casting, alignment */
if (PyArray_DESCR(op1_array) != operation_descrs[0]) {
fast_path = 0;
}
if (PyArray_DESCR(op1_array) != operation_descrs[nop - 1]) {
/* output casting */
fast_path = 0;
}
if (!PyArray_ISALIGNED(op1_array)) {
fast_path = 0;
}
if (nop >2) {
if (PyArray_DESCR(op2_array) != operation_descrs[1]) {
fast_path = 0;
}
if (!PyArray_ISALIGNED(op2_array)) {
fast_path = 0;
}
}
if (fast_path == 1) {
/*
* Try to use trivial loop (1d, no casting, aligned) if
* - the matching info has a indexed loop
* - idx must be exactly one integer index array
* - all operands are 1d
* A future enhancement could loosen the restriction on 1d operands
* by adding an iteration loop inside trivial_at_loop
*/
if ((ufuncimpl->contiguous_indexed_loop != NULL) &&
(PyArray_NDIM(op1_array) == 1) &&
(op2_array == NULL || PyArray_NDIM(op2_array) <= 1) &&
(iter->subspace_iter == NULL) && (iter->num_fancy == 1)) {
res = trivial_at_loop(ufuncimpl, flags, iter, op1_array,
op2_array, &context);
}
else {
/* Couldn't use the fastest path, use the faster path */
res = ufunc_at__fast_iter(ufunc, flags, iter, iter2, op1_array,
op2_array, strided_loop, &context, auxdata);
}
} else {
res = ufunc_at__slow_iter(ufunc, flags, iter, iter2, op1_array,
op2_array, operation_descrs, strided_loop, &context,
auxdata);
}
fail:
NPY_AUXDATA_FREE(auxdata);
Py_XDECREF(op2_array);
Py_XDECREF(iter2);
for (int i = 0; i < nop; i++) {
Py_XDECREF(operation_descrs[i]);
}
/*
* An error should only be possible if needs_api is true or `res != 0`,
* but this is not strictly correct for old-style ufuncs
* (e.g. `power` released the GIL but manually set an Exception).
*/
if (res != 0 || PyErr_Occurred()) {
/* iter_buffer has already been deallocated, don't use NpyIter_Dealloc */
if (PyArray_FLAGS(op1_array) & NPY_ARRAY_WRITEBACKIFCOPY) {
PyArray_DiscardWritebackIfCopy(op1_array);
}
// iter might own the last reference to op1_array,
// so it must be decref'd second
Py_XDECREF(iter);
return NULL;
}
else {
Py_XDECREF(iter);
Py_RETURN_NONE;
}
}
typedef struct {
PyArrayMethod_StridedLoop *strided_loop;
PyArrayMethod_Context *context;
NpyAuxData *auxdata;
/* Should move to flags, but lets keep it bools for now: */
npy_bool requires_pyapi;
npy_bool no_floatingpoint_errors;
PyArrayMethod_Context _full_context;
PyArray_Descr *_descrs[];
} ufunc_call_info;
void
free_ufunc_call_info(PyObject *self)
{
ufunc_call_info *call_info = PyCapsule_GetPointer(
self, "numpy_1.24_ufunc_call_info");
PyArrayMethod_Context *context = call_info->context;
int nargs = context->method->nin + context->method->nout;
for (int i = 0; i < nargs; i++) {
Py_DECREF(context->descriptors[i]);
}
Py_DECREF(context->caller);
Py_DECREF(context->method);
NPY_AUXDATA_FREE(call_info->auxdata);
PyObject_Free(call_info);
}
/*
* Python entry-point to ufunc promotion and dtype/descr resolution.
*
* This function does most of the work required to execute ufunc without
* actually executing it.
* This can be very useful for downstream libraries that reimplement NumPy
* functionality, such as Numba or Dask.
*/
static PyObject *
py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
NPY_PREPARE_ARGPARSER;
PyObject *descrs_tuple;
PyObject *signature_obj = NULL;
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
npy_bool reduction = NPY_FALSE;
if (npy_parse_arguments("resolve_dtypes", args, len_args, kwnames,
"", NULL, &descrs_tuple,
"$signature", NULL, &signature_obj,
"$casting", &PyArray_CastingConverter, &casting,
"$reduction", &PyArray_BoolConverter, &reduction,
NULL, NULL, NULL) < 0) {
return NULL;
}
if (reduction && (ufunc->nin != 2 || ufunc->nout != 1)) {
PyErr_SetString(PyExc_ValueError,
"ufunc is not compatible with reduction operations.");
return NULL;
}
/*
* Legacy type resolvers expect NumPy arrays as input. Until NEP 50 is
* adopted, it is most convenient to ensure that we have an "array" object
* before calling the type promotion. Eventually, this hack may be moved
* into the legacy type resolution code itself (probably after NumPy stops
* using legacy type resolution itself for the most part).
*
* We make the pretty safe assumptions here that:
* - Nobody will actually do anything with the array objects besides
* checking the descriptor or calling CanCast.
* - No type resolver will cause weird paths that mess with our promotion
* state (or mind us messing with it).
*/
PyObject *result = NULL;
PyObject *result_dtype_tuple = NULL;
PyArrayObject *dummy_arrays[NPY_MAXARGS] = {NULL};
PyArray_DTypeMeta *DTypes[NPY_MAXARGS] = {NULL};
PyArray_DTypeMeta *signature[NPY_MAXARGS] = {NULL};
PyArray_Descr *operation_descrs[NPY_MAXARGS] = {NULL};
npy_bool promoting_pyscalars = NPY_FALSE;
if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) {
goto finish;
}
if (!PyTuple_CheckExact(descrs_tuple)
|| PyTuple_Size(descrs_tuple) != ufunc->nargs) {
PyErr_SetString(PyExc_TypeError,
"resolve_dtypes: The dtypes must be a tuple of "
"`ufunc.nargs` length.");
goto finish;
}
for (int i=0; i < ufunc->nargs; i++) {
/*
* We create dummy arrays for now. It should be OK to make this
* truly "dummy" (not even proper objects), but that is a hack better
* left for the legacy_type_resolution wrapper when NEP 50 is done.
*/
PyObject *descr_obj = PyTuple_GET_ITEM(descrs_tuple, i);
PyArray_Descr *descr;
if (PyArray_DescrCheck(descr_obj)) {
descr = (PyArray_Descr *)descr_obj;
Py_INCREF(descr);
dummy_arrays[i] = (PyArrayObject *)PyArray_NewFromDescr_int(
&PyArray_Type, descr, 0, NULL, NULL, NULL,
0, NULL, NULL, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY);
if (dummy_arrays[i] == NULL) {
goto finish;
}
DTypes[i] = NPY_DTYPE(descr);
Py_INCREF(DTypes[i]);
}
/* Explicitly allow int, float, and complex for the "weak" types. */
else if (descr_obj == (PyObject *)&PyLong_Type) {
descr = PyArray_DescrFromType(NPY_INTP);
dummy_arrays[i] = (PyArrayObject *)PyArray_Empty(0, NULL, descr, 0);
if (dummy_arrays[i] == NULL) {
goto finish;
}
PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_INT);
Py_INCREF(&PyArray_PyLongDType);
DTypes[i] = &PyArray_PyLongDType;
promoting_pyscalars = NPY_TRUE;
}
else if (descr_obj == (PyObject *)&PyFloat_Type) {
descr = PyArray_DescrFromType(NPY_DOUBLE);
dummy_arrays[i] = (PyArrayObject *)PyArray_Empty(0, NULL, descr, 0);
if (dummy_arrays[i] == NULL) {
goto finish;
}
PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_FLOAT);
Py_INCREF(&PyArray_PyFloatDType);
DTypes[i] = &PyArray_PyFloatDType;
promoting_pyscalars = NPY_TRUE;
}
else if (descr_obj == (PyObject *)&PyComplex_Type) {
descr = PyArray_DescrFromType(NPY_CDOUBLE);
dummy_arrays[i] = (PyArrayObject *)PyArray_Empty(0, NULL, descr, 0);
if (dummy_arrays[i] == NULL) {
goto finish;
}
PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_COMPLEX);
Py_INCREF(&PyArray_PyComplexDType);
DTypes[i] = &PyArray_PyComplexDType;
promoting_pyscalars = NPY_TRUE;
}
else if (descr_obj == Py_None) {
if (i < ufunc->nin && !(reduction && i == 0)) {
PyErr_SetString(PyExc_TypeError,
"All input dtypes must be provided "
"(except the first one in reductions)");
goto finish;
}
}
else {
PyErr_SetString(PyExc_TypeError,
"Provided dtype must be a valid NumPy dtype, "
"int, float, complex, or None.");
goto finish;
}
}
PyArrayMethodObject *ufuncimpl;
if (!reduction) {
ufuncimpl = promote_and_get_ufuncimpl(ufunc,
dummy_arrays, signature, DTypes, NPY_FALSE,
promoting_pyscalars, NPY_FALSE);
if (ufuncimpl == NULL) {
goto finish;
}
/* Find the correct descriptors for the operation */
if (resolve_descriptors(ufunc->nargs, ufunc, ufuncimpl,
dummy_arrays, operation_descrs, signature, DTypes,
NULL, casting) < 0) {
goto finish;
}
if (validate_casting(
ufuncimpl, ufunc, dummy_arrays, operation_descrs, casting) < 0) {
goto finish;
}
}
else { /* reduction */
if (signature[2] != NULL) {
PyErr_SetString(PyExc_ValueError,
"Reduction signature must end with None, instead pass "
"the first DType in the signature.");
goto finish;
}
if (dummy_arrays[2] != NULL) {
PyErr_SetString(PyExc_TypeError,
"Output dtype must not be passed for reductions, "
"pass the first input instead.");
goto finish;
}
ufuncimpl = reducelike_promote_and_resolve(ufunc,
dummy_arrays[1], dummy_arrays[0], signature, NPY_FALSE,
operation_descrs, casting, "resolve_dtypes");
if (ufuncimpl == NULL) {
goto finish;
}
}
result = PyArray_TupleFromItems(
ufunc->nargs, (PyObject **)operation_descrs, 0);
if (result == NULL || !return_context) {
goto finish;
}
/* Result will be (dtype_tuple, call_info), so move it and clear result */
result_dtype_tuple = result;
result = NULL;
/* We may have to return the context: */
ufunc_call_info *call_info;
call_info = PyObject_Malloc(sizeof(ufunc_call_info)
+ ufunc->nargs * sizeof(PyArray_Descr *));
if (call_info == NULL) {
PyErr_NoMemory();
goto finish;
}
call_info->strided_loop = NULL;
call_info->auxdata = NULL;
call_info->context = &call_info->_full_context;
/*
* We create a capsule with NumPy 1.24 in the name to signal that it is
* prone to change in version updates (it doesn't have to).
* This capsule is documented in the `ufunc._resolve_dtypes_and_context`
* docstring.
*/
PyObject *capsule = PyCapsule_New(
call_info, "numpy_1.24_ufunc_call_info", &free_ufunc_call_info);
if (capsule == NULL) {
PyObject_Free(call_info);
goto finish;
}
PyArrayMethod_Context *context = call_info->context;
Py_INCREF(ufunc);
context->caller = (PyObject *)ufunc;
Py_INCREF(ufuncimpl);
context->method = ufuncimpl;
context->descriptors = call_info->_descrs;
for (int i=0; i < ufunc->nargs; i++) {
Py_INCREF(operation_descrs[i]);
((PyArray_Descr **)context->descriptors)[i] = operation_descrs[i];
}
result = PyTuple_Pack(2, result_dtype_tuple, capsule);
/* cleanup and return */
Py_DECREF(capsule);
finish:
Py_XDECREF(result_dtype_tuple);
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(signature[i]);
Py_XDECREF(dummy_arrays[i]);
Py_XDECREF(operation_descrs[i]);
Py_XDECREF(DTypes[i]);
}
return result;
}
static PyObject *
py_resolve_dtypes(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
return py_resolve_dtypes_generic(ufunc, NPY_FALSE, args, len_args, kwnames);
}
static PyObject *
py_resolve_dtypes_and_context(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
return py_resolve_dtypes_generic(ufunc, NPY_TRUE, args, len_args, kwnames);
}
static PyObject *
py_get_strided_loop(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
NPY_PREPARE_ARGPARSER;
PyObject *call_info_obj;
PyObject *fixed_strides_obj = Py_None;
npy_intp fixed_strides[NPY_MAXARGS];
if (npy_parse_arguments("_get_strided_loop", args, len_args, kwnames,
"", NULL, &call_info_obj,
"$fixed_strides", NULL, &fixed_strides_obj,
NULL, NULL, NULL) < 0) {
return NULL;
}
ufunc_call_info *call_info = PyCapsule_GetPointer(
call_info_obj, "numpy_1.24_ufunc_call_info");
if (call_info == NULL) {
/* Cannot have a context with NULL inside... */
assert(PyErr_Occurred());
return NULL;
}
if (call_info->strided_loop != NULL) {
PyErr_SetString(PyExc_TypeError,
"ufunc call_info has already been filled/used!");
return NULL;
}
if (call_info->context->caller != (PyObject *)ufunc) {
PyErr_SetString(PyExc_TypeError,
"calling get_strided_loop with incompatible context");
return NULL;
}
/*
* Strict conversion of fixed_strides, None, or tuple of int or None.
*/
if (fixed_strides_obj == Py_None) {
for (int i = 0; i < ufunc->nargs; i++) {
fixed_strides[i] = NPY_MAX_INTP;
}
}
else if (PyTuple_CheckExact(fixed_strides_obj)
&& PyTuple_Size(fixed_strides_obj) == ufunc->nargs) {
for (int i = 0; i < ufunc->nargs; i++) {
PyObject *stride = PyTuple_GET_ITEM(fixed_strides_obj, i);
if (PyLong_CheckExact(stride)) {
fixed_strides[i] = PyLong_AsSsize_t(stride);
if (error_converting(fixed_strides[i])) {
return NULL;
}
}
else if (stride == Py_None) {
fixed_strides[i] = NPY_MAX_INTP;
}
else {
PyErr_SetString(PyExc_TypeError,
"_get_strided_loop(): fixed_strides tuple must contain "
"Python ints or None");
return NULL;
}
}
}
else {
PyErr_SetString(PyExc_TypeError,
"_get_strided_loop(): fixed_strides must be a tuple or None");
return NULL;
}
NPY_ARRAYMETHOD_FLAGS flags;
if (call_info->context->method->get_strided_loop(call_info->context,
1, 0, fixed_strides, &call_info->strided_loop, &call_info->auxdata,
&flags) < 0) {
return NULL;
}
call_info->requires_pyapi = flags & NPY_METH_REQUIRES_PYAPI;
call_info->no_floatingpoint_errors = (
flags & NPY_METH_NO_FLOATINGPOINT_ERRORS);
Py_RETURN_NONE;
}
static struct PyMethodDef ufunc_methods[] = {
{"reduce",
(PyCFunction)ufunc_reduce,
METH_FASTCALL | METH_KEYWORDS, NULL },
{"accumulate",
(PyCFunction)ufunc_accumulate,
METH_FASTCALL | METH_KEYWORDS, NULL },
{"reduceat",
(PyCFunction)ufunc_reduceat,
METH_FASTCALL | METH_KEYWORDS, NULL },
{"outer",
(PyCFunction)ufunc_outer,
METH_FASTCALL | METH_KEYWORDS, NULL},
{"at",
(PyCFunction)ufunc_at,
METH_VARARGS, NULL},
/* Lower level methods: */
{"resolve_dtypes",
(PyCFunction)py_resolve_dtypes,
METH_FASTCALL | METH_KEYWORDS, NULL},
/*
* The following two functions are public API, but underscored since they
* are C-user specific and allow direct access to the core of ufunc loops.
* (See their documentation for API stability.)
*/
{"_resolve_dtypes_and_context",
(PyCFunction)py_resolve_dtypes_and_context,
METH_FASTCALL | METH_KEYWORDS, NULL},
{"_get_strided_loop",
(PyCFunction)py_get_strided_loop,
METH_FASTCALL | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL} /* sentinel */
};
/*****************************************************************************
*** UFUNC GETSET ***
*****************************************************************************/
static char
_typecharfromnum(int num) {
PyArray_Descr *descr;
char ret;
descr = PyArray_DescrFromType(num);
ret = descr->type;
Py_DECREF(descr);
return ret;
}
static PyObject *
ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
PyObject *doc;
// If there is a __doc__ in the instance __dict__, use it.
int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__doc__, &doc);
if (result == -1) {
return NULL;
}
else if (result == 1) {
return doc;
}
if (npy_cache_import_runtime(
"numpy._core._internal", "_ufunc_doc_signature_formatter",
&npy_runtime_imports._ufunc_doc_signature_formatter) == -1) {
return NULL;
}
/*
* Put docstring first or FindMethod finds it... could so some
* introspection on name and nin + nout to automate the first part
* of it the doc string shouldn't need the calling convention
*/
doc = PyObject_CallFunctionObjArgs(
npy_runtime_imports._ufunc_doc_signature_formatter,
(PyObject *)ufunc, NULL);
if (doc == NULL) {
return NULL;
}
if (ufunc->doc != NULL) {
Py_SETREF(doc, PyUnicode_FromFormat("%S\n\n%s", doc, ufunc->doc));
}
return doc;
}
static int
ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored))
{
if (doc == NULL) {
return PyDict_DelItem(ufunc->dict, npy_interned_str.__doc__);
} else {
return PyDict_SetItem(ufunc->dict, npy_interned_str.__doc__, doc);
}
}
static PyObject *
ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
return PyLong_FromLong(ufunc->nin);
}
static PyObject *
ufunc_get_nout(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
return PyLong_FromLong(ufunc->nout);
}
static PyObject *
ufunc_get_nargs(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
return PyLong_FromLong(ufunc->nargs);
}
static PyObject *
ufunc_get_ntypes(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
return PyLong_FromLong(ufunc->ntypes);
}
static PyObject *
ufunc_get_types(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
/* return a list with types grouped input->output */
PyObject *list;
PyObject *str;
int k, j, n, nt = ufunc->ntypes;
int ni = ufunc->nin;
int no = ufunc->nout;
char *t;
list = PyList_New(nt);
if (list == NULL) {
return NULL;
}
t = PyArray_malloc(no+ni+2);
n = 0;
for (k = 0; k < nt; k++) {
for (j = 0; j<ni; j++) {
t[j] = _typecharfromnum(ufunc->types[n]);
n++;
}
t[ni] = '-';
t[ni+1] = '>';
for (j = 0; j < no; j++) {
t[ni + 2 + j] = _typecharfromnum(ufunc->types[n]);
n++;
}
str = PyUnicode_FromStringAndSize(t, no + ni + 2);
PyList_SET_ITEM(list, k, str);
}
PyArray_free(t);
return list;
}
static PyObject *
ufunc_get_name(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
return PyUnicode_FromString(ufunc->name);
}
static PyObject *
ufunc_get_identity(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
npy_bool reorderable;
return PyUFunc_GetDefaultIdentity(ufunc, &reorderable);
}
static PyObject *
ufunc_get_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored))
{
if (!ufunc->core_enabled) {
Py_RETURN_NONE;
}
return PyUnicode_FromString(ufunc->core_signature);
}
#undef _typecharfromnum
static PyGetSetDef ufunc_getset[] = {
{"__doc__",
(getter)ufunc_get_doc, (setter)ufunc_set_doc,
NULL, NULL},
{"__name__",
(getter)ufunc_get_name,
NULL, NULL, NULL},
{"nin",
(getter)ufunc_get_nin,
NULL, NULL, NULL},
{"nout",
(getter)ufunc_get_nout,
NULL, NULL, NULL},
{"nargs",
(getter)ufunc_get_nargs,
NULL, NULL, NULL},
{"ntypes",
(getter)ufunc_get_ntypes,
NULL, NULL, NULL},
{"types",
(getter)ufunc_get_types,
NULL, NULL, NULL},
{"identity",
(getter)ufunc_get_identity,
NULL, NULL, NULL},
{"signature",
(getter)ufunc_get_signature,
NULL, NULL, NULL},
// __signature__ stored in `__dict__`, see `_globals._SignatureDescriptor`
{NULL, NULL, NULL, NULL, NULL}, /* Sentinel */
};
/******************************************************************************
*** UFUNC MEMBERS ***
*****************************************************************************/
static PyMemberDef ufunc_members[] = {
{"__dict__", T_OBJECT, offsetof(PyUFuncObject, dict),
READONLY},
{NULL},
};
/******************************************************************************
*** UFUNC TYPE OBJECT ***
*****************************************************************************/
NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "numpy.ufunc",
.tp_basicsize = sizeof(PyUFuncObject),
.tp_dealloc = (destructor)ufunc_dealloc,
.tp_vectorcall_offset = offsetof(PyUFuncObject, vectorcall),
.tp_repr = (reprfunc)ufunc_repr,
.tp_call = &PyVectorcall_Call,
.tp_str = (reprfunc)ufunc_repr,
.tp_flags = Py_TPFLAGS_DEFAULT |
_Py_TPFLAGS_HAVE_VECTORCALL |
Py_TPFLAGS_HAVE_GC,
.tp_traverse = (traverseproc)ufunc_traverse,
.tp_methods = ufunc_methods,
.tp_getset = ufunc_getset,
.tp_getattro = PyObject_GenericGetAttr,
.tp_setattro = PyObject_GenericSetAttr,
// TODO when Python 3.12 is the minimum supported version,
// use Py_TPFLAGS_MANAGED_DICT
.tp_members = ufunc_members,
.tp_dictoffset = offsetof(PyUFuncObject, dict),
};
/* End of code for ufunc objects */ | c | github | https://github.com/numpy/numpy | numpy/_core/src/umath/ufunc_object.c |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Analyzer CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def line_number_above():
return tf_inspect.stack()[1][2] - 1
def parse_op_and_node(line):
"""Parse a line containing an op node followed by a node name.
For example, if the line is
" [Variable] hidden/weights",
this function will return ("Variable", "hidden/weights")
Args:
line: The line to be parsed, as a str.
Returns:
Name of the parsed op type.
Name of the parsed node.
"""
op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "")
# Not using [-1], to tolerate any other items that might be present behind
# the node name.
node_name = line.strip().split(" ")[1]
return op_type, node_name
def assert_column_header_command_shortcut(tst,
command,
reverse,
node_name_regex,
op_type_regex,
tensor_filter_name):
tst.assertFalse(reverse and "-r" in command)
tst.assertFalse(not(op_type_regex) and ("-t %s" % op_type_regex) in command)
tst.assertFalse(
not(node_name_regex) and ("-t %s" % node_name_regex) in command)
tst.assertFalse(
not(tensor_filter_name) and ("-t %s" % tensor_filter_name) in command)
def assert_listed_tensors(tst,
out,
expected_tensor_names,
expected_op_types,
node_name_regex=None,
op_type_regex=None,
tensor_filter_name=None,
sort_by="timestamp",
reverse=False):
"""Check RichTextLines output for list_tensors commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
expected_tensor_names: (list of str) Expected tensor names in the list.
expected_op_types: (list of str) Expected op types of the tensors, in the
same order as the expected_tensor_names.
node_name_regex: Optional: node name regex filter.
op_type_regex: Optional: op type regex filter.
tensor_filter_name: Optional: name of the tensor filter.
sort_by: (str) (timestamp | op_type | tensor_name) the field by which the
tensors in the list are sorted.
reverse: (bool) whether the sorting is in reverse (i.e., descending) order.
"""
line_iter = iter(out.lines)
attr_segs = out.font_attr_segs
line_counter = 0
num_tensors = len(expected_tensor_names)
if tensor_filter_name is None:
tst.assertEqual("%d dumped tensor(s):" % num_tensors, next(line_iter))
else:
tst.assertEqual("%d dumped tensor(s) passing filter \"%s\":" %
(num_tensors, tensor_filter_name), next(line_iter))
line_counter += 1
if op_type_regex is not None:
tst.assertEqual("Op type regex filter: \"%s\"" % op_type_regex,
next(line_iter))
line_counter += 1
if node_name_regex is not None:
tst.assertEqual("Node name regex filter: \"%s\"" % node_name_regex,
next(line_iter))
line_counter += 1
tst.assertEqual("", next(line_iter))
line_counter += 1
# Verify the column heads "t (ms)", "Op type" and "Tensor name" are present.
line = next(line_iter)
tst.assertIn("t (ms)", line)
tst.assertIn("Op type", line)
tst.assertIn("Tensor name", line)
# Verify the command shortcuts in the top row.
attr_segs = out.font_attr_segs[line_counter]
attr_seg = attr_segs[0]
tst.assertEqual(0, attr_seg[0])
tst.assertEqual(len("t (ms)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s timestamp", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Size")
attr_seg = attr_segs[1]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Size (B)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s dump_size", command)
assert_column_header_command_shortcut(tst, command, reverse, node_name_regex,
op_type_regex, tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Op type")
attr_seg = attr_segs[2]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Op type"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s op_type", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Tensor name")
attr_seg = attr_segs[3]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s tensor_name", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
# Verify the listed tensors and their timestamps.
tensor_timestamps = []
dump_sizes_bytes = []
op_types = []
tensor_names = []
for line in line_iter:
items = line.split(" ")
items = [item for item in items if item]
rel_time = float(items[0][1:-1])
tst.assertGreaterEqual(rel_time, 0.0)
tensor_timestamps.append(rel_time)
dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1]))
op_types.append(items[2])
tensor_names.append(items[3])
# Verify that the tensors should be listed in ascending order of their
# timestamps.
if sort_by == "timestamp":
sorted_timestamps = sorted(tensor_timestamps)
if reverse:
sorted_timestamps.reverse()
tst.assertEqual(sorted_timestamps, tensor_timestamps)
elif sort_by == "dump_size":
sorted_dump_sizes_bytes = sorted(dump_sizes_bytes)
if reverse:
sorted_dump_sizes_bytes.reverse()
tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes)
elif sort_by == "op_type":
sorted_op_types = sorted(op_types)
if reverse:
sorted_op_types.reverse()
tst.assertEqual(sorted_op_types, op_types)
elif sort_by == "tensor_name":
sorted_tensor_names = sorted(tensor_names)
if reverse:
sorted_tensor_names.reverse()
tst.assertEqual(sorted_tensor_names, tensor_names)
else:
tst.fail("Invalid value in sort_by: %s" % sort_by)
# Verify that the tensors are all listed.
for tensor_name, op_type in zip(expected_tensor_names, expected_op_types):
tst.assertIn(tensor_name, tensor_names)
index = tensor_names.index(tensor_name)
tst.assertEqual(op_type, op_types[index])
def assert_node_attribute_lines(tst,
out,
node_name,
op_type,
device,
input_op_type_node_name_pairs,
ctrl_input_op_type_node_name_pairs,
recipient_op_type_node_name_pairs,
ctrl_recipient_op_type_node_name_pairs,
attr_key_val_pairs=None,
num_dumped_tensors=None,
show_stack_trace=False,
stack_trace_available=False):
"""Check RichTextLines output for node_info commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
node_name: Name of the node.
op_type: Op type of the node, as a str.
device: Name of the device on which the node resides.
input_op_type_node_name_pairs: A list of 2-tuples of op type and node name,
for the (non-control) inputs to the node.
ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the control inputs to the node.
recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the (non-control) output recipients to the node.
ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and
node name, for the control output recipients to the node.
attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a
list of 2-tuples.
num_dumped_tensors: Optional: number of tensor dumps from the node.
show_stack_trace: (bool) whether the stack trace of the node's
construction is asserted to be present.
stack_trace_available: (bool) whether Python stack trace is available.
"""
line_iter = iter(out.lines)
tst.assertEqual("Node %s" % node_name, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" Op: %s" % op_type, next(line_iter))
tst.assertEqual(" Device: %s" % device, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d input(s) + %d control input(s):" %
(len(input_op_type_node_name_pairs),
len(ctrl_input_op_type_node_name_pairs)), next(line_iter))
# Check inputs.
tst.assertEqual(" %d input(s):" % len(input_op_type_node_name_pairs),
next(line_iter))
for op_type, node_name in input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
# Check control inputs.
if ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" %d control input(s):" %
len(ctrl_input_op_type_node_name_pairs), next(line_iter))
for op_type, node_name in ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d recipient(s) + %d control recipient(s):" %
(len(recipient_op_type_node_name_pairs),
len(ctrl_recipient_op_type_node_name_pairs)),
next(line_iter))
# Check recipients, the order of which is not deterministic.
tst.assertEqual(" %d recipient(s):" %
len(recipient_op_type_node_name_pairs), next(line_iter))
t_recs = []
for _ in recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_recs.append((op_type, node_name))
tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs)
# Check control recipients, the order of which is not deterministic.
if ctrl_recipient_op_type_node_name_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d control recipient(s):" %
len(ctrl_recipient_op_type_node_name_pairs),
next(line_iter))
t_ctrl_recs = []
for _ in ctrl_recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_ctrl_recs.append((op_type, node_name))
tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs)
# The order of multiple attributes can be non-deterministic.
if attr_key_val_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual("Node attributes:", next(line_iter))
kv_pairs = []
for key, val in attr_key_val_pairs:
key = next(line_iter).strip().replace(":", "")
val = next(line_iter).strip()
kv_pairs.append((key, val))
tst.assertEqual("", next(line_iter))
tst.assertItemsEqual(attr_key_val_pairs, kv_pairs)
if num_dumped_tensors is not None:
tst.assertEqual("%d dumped tensor(s):" % num_dumped_tensors,
next(line_iter))
tst.assertEqual("", next(line_iter))
dump_timestamps_ms = []
for _ in xrange(num_dumped_tensors):
line = next(line_iter)
tst.assertStartsWith(line.strip(), "Slot 0 @ DebugIdentity @")
tst.assertTrue(line.strip().endswith(" ms"))
dump_timestamp_ms = float(line.strip().split(" @ ")[-1].replace("ms", ""))
tst.assertGreaterEqual(dump_timestamp_ms, 0.0)
dump_timestamps_ms.append(dump_timestamp_ms)
tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms)
if show_stack_trace:
tst.assertEqual("", next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual("Traceback of node construction:", next(line_iter))
if stack_trace_available:
try:
depth_counter = 0
while True:
for i in range(5):
line = next(line_iter)
if i == 0:
tst.assertEqual(depth_counter, int(line.split(":")[0]))
elif i == 1:
tst.assertStartsWith(line, " Line:")
elif i == 2:
tst.assertStartsWith(line, " Function:")
elif i == 3:
tst.assertStartsWith(line, " Text:")
elif i == 4:
tst.assertEqual("", line)
depth_counter += 1
except StopIteration:
tst.assertEqual(0, i)
else:
tst.assertEqual("(Unavailable because no Python graph has been loaded)",
next(line_iter))
def check_syntax_error_output(tst, out, command_prefix):
"""Check RichTextLines output for valid command prefix but invalid syntax."""
tst.assertEqual([
"Syntax error for command: %s" % command_prefix,
"For help, do \"help %s\"" % command_prefix
], out.lines)
def check_error_output(tst, out, command_prefix, args):
"""Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error.
"""
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0],
"Error occurred during handling of command: %s %s" %
(command_prefix, " ".join(args)))
def check_main_menu(tst,
out,
list_tensors_enabled=False,
node_info_node_name=None,
print_tensor_node_name=None,
list_inputs_node_name=None,
list_outputs_node_name=None):
"""Check the main menu annotation of an output."""
tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)
menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]
tst.assertEqual(list_tensors_enabled,
menu.caption_to_item("list_tensors").is_enabled())
menu_item = menu.caption_to_item("node_info")
if node_info_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(node_info_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("print_tensor")
if print_tensor_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_inputs")
if list_inputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_outputs")
if list_outputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
tst.assertTrue(menu.caption_to_item("run_info").is_enabled())
tst.assertTrue(menu.caption_to_item("help").is_enabled())
def check_menu_item(tst, out, line_index, expected_begin, expected_end,
expected_command):
attr_segs = out.font_attr_segs[line_index]
found_menu_item = False
for begin, end, attribute in attr_segs:
attributes = [attribute] if not isinstance(attribute, list) else attribute
menu_item = [attribute for attribute in attributes if
isinstance(attribute, debugger_cli_common.MenuItem)]
if menu_item:
tst.assertEqual(expected_begin, begin)
tst.assertEqual(expected_end, end)
tst.assertEqual(expected_command, menu_item[0].content)
found_menu_item = True
break
tst.assertTrue(found_menu_item)
def create_analyzer_cli(dump):
"""Create an analyzer CLI.
Args:
dump: A `DebugDumpDir` object to base the analyzer CLI on.
Returns:
1) A `DebugAnalyzer` object created based on `dump`.
2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object
and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.
"""
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(dump)
# Construct the handler registry.
registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
registry.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
registry.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
registry.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
registry.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
registry.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
registry.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
registry.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
registry.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
return analyzer, registry
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._dump_root_for_unique = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
cls._curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
cls._sess = session.Session(config=no_rewrite_session_config())
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2], name="u_init")
u = variables.Variable(u_init, name=u_name)
cls._u_line_number = line_number_above()
v_init = constant_op.constant(v_init_val, shape=[2, 1], name="v_init")
v = variables.Variable(v_init, name=v_name)
cls._v_line_number = line_number_above()
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
cls._w_line_number = line_number_above()
x = math_ops.add(w, w, name="simple_mul_add/add")
cls._x_line_number = line_number_above()
a = variables.Variable([1, 3, 3, 7], name="a")
u.initializer.run()
v.initializer.run()
a.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run([x], options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
shutil.rmtree(cls._dump_root_for_unique)
def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):
timestamp_col_width, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([]))
self.assertEqual(len("t (ms)") + 1, timestamp_col_width)
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width)
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000)
self.assertEqual(
"VariableV2", self._debug_dump.node_op_type(dump.node_name))
_, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([dump]))
# The length of str(dump.dump_size_bytes) is less than the length of
# "Size (B)" (8). So the column width should be determined by the length of
# "Size (B)".
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
# The length of "VariableV2" is greater than the length of "Op type". So the
# column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], ["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command("list_tensors",
["-t", "(Add|MatMul)"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["Add", "MatMul"],
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["Add"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, "MatMul",
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
attr_key_val_pairs=[("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
num_dumped_tensors=1)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 16,
len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name)
def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=False)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=True)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("node_info", [tensor_name])
assert_node_attribute_lines(self, out, node_name, "Identity",
self._main_device,
[("VariableV2", "simple_mul_add/u")], [],
[("MatMul", "simple_mul_add/matmul")], [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoNonexistentNodeName(self):
out = self._registry.dispatch_command("node_info", ["bar"])
self.assertEqual(
["ERROR: There is no node named \"bar\" in the partition graphs"],
out.lines)
# Check color indicating error.
self.assertEqual({0: [(0, 59, cli_shared.COLOR_RED)]}, out.font_attr_segs)
check_main_menu(self, out, list_tensors_enabled=True)
def testPrintTensor(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRanges(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[5])
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[[-inf, -5.5], [5.5, inf]]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([[-inf, -5.5], [5.5, inf]]): "
"1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(9, 11, "bold")], out.font_attr_segs[4])
self.assertNotIn(5, out.font_attr_segs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRangesAndIncludingNumericSummary(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]", "-s"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"Numeric summary:",
"| - + | total |",
"| 1 1 | 2 |",
"| min max mean std |",
"| -2.0 7.0 2.5 4.5 |",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(10, out.annotations)
self.assertIn(11, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[11])
def testPrintTensorWithSlicing(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, :]"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity[1, :]\":" % tensor_name, " dtype: float64",
" shape: (1,)", "", "array([-2.])"
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidSlicingString(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, foo()]"], screen_info={"cols": 80})
self.assertEqual("Error occurred during handling of command: print_tensor "
+ tensor_name + "[1, foo()]:", out.lines[0])
self.assertEqual("ValueError: Invalid tensor-slicing string.",
out.lines[-2])
def testPrintTensorValidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "0"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "1"], screen_info={"cols": 80})
self.assertEqual([
"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, "
"which generated one dump."
], out.lines)
self.assertNotIn("tensor_metadata", out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("print_tensor", [node_name])
self.assertEqual([
"Tensor \"%s:0:DebugIdentity\":" % node_name, " dtype: float64",
" shape: (2, 1)", "", "array([[ 7.],", " [-2.]])"
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorNonexistentNodeName(self):
out = self._registry.dispatch_command(
"print_tensor", ["simple_mul_add/matmul/foo:0"])
self.assertEqual([
"ERROR: Node \"simple_mul_add/matmul/foo\" does not exist in partition "
"graphs"
], out.lines)
check_main_menu(self, out, list_tensors_enabled=True)
def testEvalExpression(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"eval", ["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name)],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
"",
"Numeric summary:",
"| - + | total |",
"| 2 2 | 4 |",
"| min max mean std |",
"| -14.0 49.0 6.25 25.7524270701 |",
"",
"array([[ 49., -14.],",
" [-14., 4.]])"], out.lines)
def testAddGetTensorFilterLambda(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddGetTensorFilterNestedFunction(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
def foo_filter(unused_arg_0, unused_arg_1):
return True
analyzer.add_tensor_filter("foo_filter", foo_filter)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddTensorFilterEmptyName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(ValueError,
"Input argument filter_name cannot be empty."):
analyzer.add_tensor_filter("", lambda datum, tensor: True)
def testAddTensorFilterNonStrName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(
TypeError,
"Input argument filter_name is expected to be str, ""but is not"):
analyzer.add_tensor_filter(1, lambda datum, tensor: True)
def testAddGetTensorFilterNonCallable(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(
TypeError, "Input argument filter_callable is expected to be callable, "
"but is not."):
analyzer.add_tensor_filter("foo_filter", "bar")
def testGetNonexistentTensorFilter(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
with self.assertRaisesRegexp(ValueError,
"There is no tensor filter named \"bar\""):
analyzer.get_tensor_filter("bar")
def _findSourceLine(self, annotated_source, line_number):
"""Find line of given line number in annotated source.
Args:
annotated_source: (debugger_cli_common.RichTextLines) the annotated source
line_number: (int) 1-based line number
Returns:
(int) If line_number is found, 0-based line index in
annotated_source.lines. Otherwise, None.
"""
index = None
for i, line in enumerate(annotated_source.lines):
if line.startswith("L%d " % line_number):
index = i
break
return index
def testPrintSourceForOpNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source", [self._curr_file_path], screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
# Verify the annotation of the line that creates v.
index = self._findSourceLine(out, self._v_line_number)
self.assertEqual(
["L%d v = variables.Variable(v_init, name=v_name)" %
self._v_line_number,
" simple_mul_add/v"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/v",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates w.
index = self._findSourceLine(out, self._w_line_number)
self.assertEqual(
["L%d " % self._w_line_number +
"w = math_ops.matmul(u, v, name=\"simple_mul_add/matmul\")",
" simple_mul_add/matmul"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/matmul",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates x.
index = self._findSourceLine(out, self._x_line_number)
self.assertEqual(
["L%d " % self._x_line_number +
"x = math_ops.add(w, w, name=\"simple_mul_add/add\")",
" simple_mul_add/add"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/add",
out.font_attr_segs[index + 1][0][2].content)
def testPrintSourceForTensorNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "--tensors"],
screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u/read:0",
" simple_mul_add/u:0"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u/read:0",
out.font_attr_segs[index + 1][0][2].content)
self.assertEqual("pt simple_mul_add/u:0",
out.font_attr_segs[index + 2][0][2].content)
def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-b", "3"],
screen_info={"cols": 80})
self.assertEqual(
2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-m", "1"],
screen_info={"cols": 80})
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" (... Omitted 2 of 3 op(s) ...) +5"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
more_elements_command = out.font_attr_segs[index + 2][-1][2].content
self.assertStartsWith(more_elements_command,
"ps %s " % self._curr_file_path)
self.assertIn(" -m 6", more_elements_command)
def testListSourceWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", [])
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", ".*/read"])
self.assertStartsWith(out.lines[1], "Node name regex filter: \".*/read\"")
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithNoMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", "^$"])
self.assertEqual([
"List of source files that created nodes in this run",
"Node name regex filter: \"^$\"", "",
"[No source file information.]"], out.lines)
def testListSourceWithPathAndNodeNameFiltersWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"list_source", ["-p", self._curr_file_path, "-n", ".*read"])
self.assertEqual([
"List of source files that created nodes in this run",
"File path regex filter: \"%s\"" % self._curr_file_path,
"Node name regex filter: \".*read\"", ""], out.lines[:4])
def testListSourceWithCompiledPythonSourceWorks(self):
def fake_list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
del dump, path_regex_whitelist, node_name_regex_whitelist
return [("compiled_1.pyc", False, 10, 20, 30, 4),
("compiled_2.pyo", False, 10, 20, 30, 5),
("uncompiled.py", False, 10, 20, 30, 6)]
with test.mock.patch.object(
source_utils, "list_source_files_against_dump",
side_effect=fake_list_source_files_against_dump):
out = self._registry.dispatch_command("list_source", [])
self.assertStartsWith(out.lines[4], "compiled_1.pyc")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[4][0])
self.assertStartsWith(out.lines[5], "compiled_2.pyo")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[5][0])
self.assertStartsWith(out.lines[6], "uncompiled.py")
self.assertEqual(0, out.font_attr_segs[6][0][0])
self.assertEqual(13, out.font_attr_segs[6][0][1])
self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0])
self.assertEqual("ps uncompiled.py -b 6",
out.font_attr_segs[6][0][2][1].content)
def testListInputInvolvingNodesWithMultipleOutputs(self):
"""List an input tree containing tensors from non-:0 output slot."""
with session.Session(config=no_rewrite_session_config()) as sess:
x = variables.Variable([1, 3, 3, 7], name="x")
_, idx = array_ops.unique(x, name="x_unique")
idx_times_two = math_ops.multiply(idx, 2, name="idx_times_two")
sess.run(x.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % self._dump_root_for_unique)
run_metadata = config_pb2.RunMetadata()
self.assertAllEqual(
[0, 2, 2, 4],
sess.run(idx_times_two,
options=run_options,
run_metadata=run_metadata))
debug_dump = debug_data.DebugDumpDir(
self._dump_root_for_unique,
partition_graphs=run_metadata.partition_graphs)
_, registry = create_analyzer_cli(debug_dump)
out = registry.dispatch_command("li", ["idx_times_two"])
self.assertEqual(
["Inputs to node \"idx_times_two\" (Depth limit = 1):",
"|- (1) x_unique:1"], out.lines[:2])
class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
# 2400 elements should exceed the default threshold (2000).
x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(x, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command registry.
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testPrintLargeTensorWithoutAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0"], screen_info={"cols": 80})
# Assert that ellipses are present in the tensor value printout.
self.assertIn("...,", out.lines[4])
# 2100 still exceeds 2000.
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]"],
screen_info={"cols": 80})
self.assertIn("...,", out.lines[4])
def testPrintLargeTensorWithAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0", "-a"],
screen_info={"cols": 80})
# Assert that ellipses are not present in the tensor value printout.
self.assertNotIn("...,", out.lines[4])
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]", "--all"],
screen_info={"cols": 80})
self.assertNotIn("...,", out.lines[4])
class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
with session.Session(config=no_rewrite_session_config()) as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variables.Variable(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies(
[x], y, name="control_deps/ctrl_dep_y")
z = math_ops.multiply(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies(
[x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command handler registry.
_, cls._registry = create_analyzer_cli(debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info",
["control_deps/ctrl_dep_y"])
assert_node_attribute_lines(
self, out, "control_deps/ctrl_dep_y", "Identity",
self._main_device, [("Add", "control_deps/y")],
[("VariableV2", "control_deps/x")],
[("Mul", "control_deps/z")],
[("Identity", "control_deps/ctrl_dep_z")])
# Call node info on a node with control recipients.
out = self._registry.dispatch_command("ni", ["control_deps/x"])
assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2",
self._main_device, [], [],
[("Identity", "control_deps/x/read")],
[("Identity", "control_deps/ctrl_dep_y"),
("Identity", "control_deps/ctrl_dep_z")])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x/read"),
len(out.lines[10]), "ni -a -d -t control_deps/x/read")
if out.lines[13].endswith("control_deps/ctrl_dep_y"):
y_line = 13
z_line = 14
else:
y_line = 14
z_line = 13
check_menu_item(self, out, y_line,
len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"),
len(out.lines[y_line]),
"ni -a -d -t control_deps/ctrl_dep_y")
check_menu_item(self, out, z_line,
len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"),
len(out.lines[z_line]),
"ni -a -d -t control_deps/ctrl_dep_z")
def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs."""
# Do not include node op types.
node_name = "control_deps/z"
out = self._registry.dispatch_command("list_inputs", [node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
# Include node op types.
out = self._registry.dispatch_command("li", ["-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) [Identity] control_deps/x/read", "| |- ...",
"|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d.", " [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name has bold attribute.
self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node."""
# Do not include node op types.
node_name = "control_deps/z"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("list_inputs", [tensor_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-t", node_name, "-c"])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read",
"| | |- (3) [VariableV2] control_deps/x",
"| |- (2) [Identity] control_deps/ctrl_dep_y",
"| |- (3) [Add] control_deps/y",
"| | |- (4) [Identity] control_deps/x/read",
"| | | |- (5) [VariableV2] control_deps/x",
"| | |- (4) [Identity] control_deps/x/read",
"| | |- (5) [VariableV2] control_deps/x",
"| |- (3) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y",
"| | |- (3) [Identity] control_deps/x/read",
"| | | |- (4) [VariableV2] control_deps/x",
"| | |- (3) [Identity] control_deps/x/read",
"| | |- (4) [VariableV2] control_deps/x",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 11,
len(out.lines[11]) - len("control_deps/ctrl_dep_y"),
len(out.lines[11]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 18,
len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command(
"li", ["-c", "-r", "-t", "-d", "2", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 2, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read", "| | |- ...",
"| |- (2) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y", "| | |- ...",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x"),
len(out.lines[10]), "li -c -r control_deps/x")
def testListInputsNodeWithoutInputs(self):
"""List the inputs to a node without any input."""
node_name = "control_deps/x"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, control " % node_name +
"inputs included):", " [None]", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testListInputsNonexistentNode(self):
out = self._registry.dispatch_command(
"list_inputs", ["control_deps/z/foo"])
self.assertEqual([
"ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines)
def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit."""
out = self._registry.dispatch_command(
"lo", ["-c", "-r", "-t", "-d", "1", "control_deps/x"])
self.assertEqual([
"Recipients of node \"control_deps/x\" (Depth limit = 1, control "
"recipients included):",
"|- (1) [Identity] control_deps/x/read",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z",
"", "Legend:", " (d): recursion depth = d.",
" (Ctrl): Control input.",
" [Op]: Input node has op type Op."], out.lines)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "lo -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "lo -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/ctrl_dep_z"),
len(out.lines[5]), "lo -c -r control_deps/ctrl_dep_z")
# Verify the bold attribute of the node name.
self.assertEqual([(20, 20 + len("control_deps/x"), "bold")],
out.font_attr_segs[0])
class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
cond = lambda loop_var: math_ops.less(loop_var, 10)
body = lambda loop_var: math_ops.add(loop_var, 1)
while_loop = control_flow_ops.while_loop(
cond, body, [loop_var], parallel_iterations=1)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_url = "file://%s" % cls._dump_root
watch_opts = run_options.debug_options.debug_tensor_watch_opts
# Add debug tensor watch for "while/Identity".
watch = watch_opts.add()
watch.node_name = "while/Identity"
watch.output_slot = 0
watch.debug_ops.append("DebugIdentity")
watch.debug_urls.append(debug_url)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(while_loop, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testMultipleDumpsPrintTensorNoNumber(self):
output = self._registry.dispatch_command("pt", ["while/Identity:0"])
self.assertEqual("Tensor \"while/Identity:0\" generated 10 dumps:",
output.lines[0])
for i in xrange(10):
self.assertTrue(output.lines[i + 1].startswith("#%d" % i))
self.assertTrue(output.lines[i + 1].endswith(
" ms] while/Identity:0:DebugIdentity"))
self.assertEqual(
"You can use the -n (--number) flag to specify which dump to print.",
output.lines[-3])
self.assertEqual("For example:", output.lines[-2])
self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1])
def testMultipleDumpsPrintTensorWithNumber(self):
for i in xrange(5):
output = self._registry.dispatch_command(
"pt", ["while/Identity:0", "-n", "%d" % i])
self.assertEqual("Tensor \"while/Identity:0:DebugIdentity (dump #%d)\":" %
i, output.lines[0])
self.assertEqual(" dtype: int32", output.lines[1])
self.assertEqual(" shape: ()", output.lines[2])
self.assertEqual("", output.lines[3])
self.assertTrue(output.lines[4].startswith("array(%d" % i))
self.assertTrue(output.lines[4].endswith(")"))
def testMultipleDumpsPrintTensorInvalidNumber(self):
output = self._registry.dispatch_command("pt",
["while/Identity:0", "-n", "10"])
self.assertEqual([
"ERROR: Specified number (10) exceeds the number of available dumps "
"(10) for tensor while/Identity:0"
], output.lines)
if __name__ == "__main__":
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Natsu @xiaoxiaojx
*/
"use strict";
const { updateHashForEntryStartup } = require("./StartupHelpers");
/** @typedef {import("../ChunkGraph")} ChunkGraph */
/** @typedef {import("../Module")} Module */
/** @typedef {import("../Chunk")} Chunk */
/** @typedef {import("../Entrypoint")} Entrypoint */
/** @typedef {import("../util/Hash")} Hash */
/** @typedef {import("../Compilation").ChunkHashContext} ChunkHashContext */
/**
* Gets information about a chunk including its entries and runtime chunk
* @param {Chunk} chunk The chunk to get information for
* @param {ChunkGraph} chunkGraph The chunk graph containing the chunk
* @returns {{ entries: [Module, Entrypoint | undefined][], runtimeChunk: Chunk | null }} Object containing chunk entries and runtime chunk
*/
function getChunkInfo(chunk, chunkGraph) {
const entries = [
...chunkGraph.getChunkEntryModulesWithChunkGroupIterable(chunk)
];
const runtimeChunk =
entries.length > 0
? /** @type {Entrypoint[][]} */
(entries)[0][1].getRuntimeChunk()
: null;
return {
entries,
runtimeChunk
};
}
/**
* Creates a chunk hash handler
* @param {string} name The name of the chunk
* @returns {(chunk: Chunk, hash: Hash, { chunkGraph }: ChunkHashContext) => void} The chunk hash handler
*/
function createChunkHashHandler(name) {
/**
* @param {Chunk} chunk The chunk to get information for
* @param {Hash} hash The hash to update
* @param {ChunkHashContext} chunkHashContext The chunk hash context
* @returns {void}
*/
return (chunk, hash, { chunkGraph }) => {
if (chunk.hasRuntime()) return;
const { entries, runtimeChunk } = getChunkInfo(chunk, chunkGraph);
hash.update(name);
hash.update("1");
if (runtimeChunk && runtimeChunk.hash) {
// https://github.com/webpack/webpack/issues/19439
// Any change to runtimeChunk should trigger a hash update,
// we shouldn't depend on or inspect its internal implementation.
// import __webpack_require__ from "./runtime-main.e9400aee33633a3973bd.js";
hash.update(runtimeChunk.hash);
}
updateHashForEntryStartup(hash, chunkGraph, entries, chunk);
};
}
module.exports = {
createChunkHashHandler,
getChunkInfo
}; | javascript | github | https://github.com/webpack/webpack | lib/javascript/ChunkFormatHelpers.js |
from datetime import datetime
import pyotp
from flask import flash, redirect, request, render_template, session, url_for
from flask_login import current_user, login_required
from app.constants import event_type
from app.lib.db_utils import create_object, update_object
from app.lib.fernet_utils import decrypt_string, encrypt_string
from app.mfa import mfa
from app.mfa.forms import RegisterMFAForm, VerifyMFAForm
from app.models import Events, MFA
@mfa.route('/', methods=['GET', 'POST'])
@login_required
def register():
form = RegisterMFAForm()
if request.method == 'POST':
if form.validate_on_submit():
device_name = form.device_name.data
secret = form.mfa_secret.data
create_object(
MFA(
user_guid=current_user.guid,
secret=encrypt_string(secret),
device_name=device_name,
is_valid=True,
)
)
create_object(
Events(
request_id=None,
user_guid=current_user.guid,
type_=event_type.MFA_DEVICE_ADDED,
timestamp=datetime.utcnow(),
new_value={'device_name': device_name, 'is_valid': True},
)
)
return redirect(url_for('mfa.verify'))
else:
mfa_secret = pyotp.random_base32()
qr_uri = pyotp.totp.TOTP(mfa_secret).provisioning_uri(name=current_user.email,
issuer_name='OpenRecords')
form.mfa_secret.data = mfa_secret
return render_template('mfa/register.html',
form=form,
mfa_secret=mfa_secret,
qr_uri=qr_uri)
@mfa.route('/verify', methods=['GET', 'POST'])
@login_required
def verify():
form = VerifyMFAForm()
if request.method == 'POST':
if form.validate_on_submit():
mfa = MFA.query.filter_by(user_guid=current_user.guid,
device_name=form.device.data,
is_valid=True).one_or_none()
secret_str = decrypt_string(mfa.secret)
pyotp_verify = pyotp.TOTP(secret_str).verify(form.code.data)
if pyotp_verify:
session['mfa_verified'] = True
return redirect(url_for('main.index', fresh_login=True))
flash("Invalid code. Please try again.", category='danger')
form.code.data = ''
return render_template('mfa/verify.html',
form=form)
else:
mfa = MFA.query.filter_by(user_guid=current_user.guid,
is_valid=True).first()
if mfa is None:
return redirect(url_for('mfa.register'))
return render_template('mfa/verify.html',
form=form)
@mfa.route('/manage', methods=['GET'])
@login_required
def manage():
mfas = MFA.query.filter_by(user_guid=current_user.guid,
is_valid=True).all()
return render_template('mfa/manage.html',
mfas=mfas)
@mfa.route('/remove', methods=['POST'])
@login_required
def remove():
device_name = request.form.get('device-name')
mfa = MFA.query.filter_by(user_guid=current_user.guid,
device_name=device_name,
is_valid=True).one_or_none()
if mfa is not None:
update_object(
{'is_valid': False},
MFA,
mfa.id
)
create_object(
Events(
request_id=None,
user_guid=current_user.guid,
type_=event_type.MFA_DEVICE_REMOVED,
timestamp=datetime.utcnow(),
previous_value={'device_name': device_name, 'is_valid': True},
new_value={'device_name': device_name, 'is_valid': False},
)
)
flash('The device was removed.', category='success')
else:
flash('Something went wrong. Please try again.', category='danger')
return redirect(url_for('mfa.manage')) | unknown | codeparrot/codeparrot-clean | ||
"""Test deCONZ component setup process."""
import asyncio
from copy import deepcopy
from homeassistant.components.deconz import (
DeconzGateway,
async_setup_entry,
async_unload_entry,
)
from homeassistant.components.deconz.const import DOMAIN as DECONZ_DOMAIN
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
ENTRY1_HOST = "1.2.3.4"
ENTRY1_PORT = 80
ENTRY1_API_KEY = "1234567890ABCDEF"
ENTRY1_BRIDGEID = "12345ABC"
ENTRY1_UUID = "456DEF"
ENTRY2_HOST = "2.3.4.5"
ENTRY2_PORT = 80
ENTRY2_API_KEY = "1234567890ABCDEF"
ENTRY2_BRIDGEID = "23456DEF"
ENTRY2_UUID = "789ACE"
async def setup_entry(hass, entry):
"""Test that setup entry works."""
with patch.object(DeconzGateway, "async_setup", return_value=True), patch.object(
DeconzGateway, "async_update_device_registry", return_value=True
):
assert await async_setup_entry(hass, entry) is True
async def test_setup_entry_fails(hass):
"""Test setup entry fails if deCONZ is not available."""
with patch("pydeconz.DeconzSession.initialize", side_effect=Exception):
await setup_deconz_integration(hass)
assert not hass.data[DECONZ_DOMAIN]
async def test_setup_entry_no_available_bridge(hass):
"""Test setup entry fails if deCONZ is not available."""
with patch("pydeconz.DeconzSession.initialize", side_effect=asyncio.TimeoutError):
await setup_deconz_integration(hass)
assert not hass.data[DECONZ_DOMAIN]
async def test_setup_entry_successful(hass):
"""Test setup entry is successful."""
config_entry = await setup_deconz_integration(hass)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert hass.data[DECONZ_DOMAIN]
assert gateway.bridgeid in hass.data[DECONZ_DOMAIN]
assert hass.data[DECONZ_DOMAIN][gateway.bridgeid].master
async def test_setup_entry_multiple_gateways(hass):
"""Test setup entry is successful with multiple gateways."""
config_entry = await setup_deconz_integration(hass)
gateway = get_gateway_from_config_entry(hass, config_entry)
data = deepcopy(DECONZ_WEB_REQUEST)
data["config"]["bridgeid"] = "01234E56789B"
config_entry2 = await setup_deconz_integration(
hass, get_state_response=data, entry_id="2"
)
gateway2 = get_gateway_from_config_entry(hass, config_entry2)
assert len(hass.data[DECONZ_DOMAIN]) == 2
assert hass.data[DECONZ_DOMAIN][gateway.bridgeid].master
assert not hass.data[DECONZ_DOMAIN][gateway2.bridgeid].master
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
config_entry = await setup_deconz_integration(hass)
assert hass.data[DECONZ_DOMAIN]
assert await async_unload_entry(hass, config_entry)
assert not hass.data[DECONZ_DOMAIN]
async def test_unload_entry_multiple_gateways(hass):
"""Test being able to unload an entry and master gateway gets moved."""
config_entry = await setup_deconz_integration(hass)
data = deepcopy(DECONZ_WEB_REQUEST)
data["config"]["bridgeid"] = "01234E56789B"
config_entry2 = await setup_deconz_integration(
hass, get_state_response=data, entry_id="2"
)
gateway2 = get_gateway_from_config_entry(hass, config_entry2)
assert len(hass.data[DECONZ_DOMAIN]) == 2
assert await async_unload_entry(hass, config_entry)
assert len(hass.data[DECONZ_DOMAIN]) == 1
assert hass.data[DECONZ_DOMAIN][gateway2.bridgeid].master | unknown | codeparrot/codeparrot-clean | ||
# buildReactiveScopeTerminalsHIR
## File
`src/HIR/BuildReactiveScopeTerminalsHIR.ts`
## Purpose
This pass transforms the HIR by inserting `ReactiveScopeTerminal` nodes to explicitly demarcate the boundaries of reactive scopes within the control flow graph. It converts the implicit scope ranges (stored on identifiers as `identifier.scope.range`) into explicit control flow structure by:
1. Inserting a `scope` terminal at the **start** of each reactive scope
2. Inserting a `goto` terminal at the **end** of each reactive scope
3. Creating fallthrough blocks to properly connect the scopes to the rest of the CFG
This transformation makes scope boundaries first-class elements in the CFG, which is essential for later passes that generate the memoization code (the `if ($[n] !== dep)` checks).
## Input Invariants
- **Properly nested scopes and blocks**: The pass assumes `assertValidBlockNesting` has passed, meaning all program blocks and reactive scopes form a proper tree hierarchy
- **Aligned scope ranges**: Reactive scope ranges have been correctly aligned and merged by previous passes
- **Valid instruction IDs**: All instructions have sequential IDs that define the scope boundaries
- **Scopes attached to identifiers**: Reactive scopes are found by traversing all `Place` operands and collecting unique non-empty scopes
## Output Guarantees
- **Explicit scope terminals**: Each reactive scope is represented in the CFG as a `ReactiveScopeTerminal` with:
- `block` - The BlockId containing the scope's instructions
- `fallthrough` - The BlockId that executes after the scope
- **Proper block structure**: Original blocks are split at scope boundaries
- **Restored HIR invariants**: The pass restores RPO ordering, predecessor sets, instruction IDs, and scope/identifier ranges
- **Updated phi nodes**: Phi operands are repointed when their source blocks are split
## Algorithm
### Step 1: Collect Scope Rewrites
```
for each reactive scope (in range pre-order):
push StartScope rewrite at scope.range.start
push EndScope rewrite at scope.range.end
```
The `recursivelyTraverseItems` helper traverses scopes in pre-order (outer scopes before inner scopes).
### Step 2: Apply Rewrites by Splitting Blocks
```
reverse queuedRewrites (to pop in ascending instruction order)
for each block:
for each instruction (or terminal):
while there are rewrites <= current instruction ID:
split block at current index
insert scope terminal (for start) or goto terminal (for end)
emit final block segment with original terminal
```
### Step 3: Repoint Phi Nodes
When a block is split, its final segment gets a new BlockId. Phi operands that referenced the original block are updated to reference the new final block.
### Step 4: Restore HIR Invariants
- Recompute RPO (reverse post-order) block traversal
- Recalculate predecessor sets
- Renumber instruction IDs
- Fix scope and identifier ranges to match new instruction IDs
## Key Data Structures
### TerminalRewriteInfo
```typescript
type TerminalRewriteInfo =
| {
kind: 'StartScope';
blockId: BlockId; // New block for scope content
fallthroughId: BlockId; // Block after scope ends
instrId: InstructionId; // Where to insert
scope: ReactiveScope; // The scope being created
}
| {
kind: 'EndScope';
instrId: InstructionId; // Where to insert
fallthroughId: BlockId; // Same as corresponding StartScope
};
```
### RewriteContext
```typescript
type RewriteContext = {
source: BasicBlock; // Original block being split
instrSliceIdx: number; // Current slice start index
nextPreds: Set<BlockId>; // Predecessors for next emitted block
nextBlockId: BlockId; // BlockId for next emitted block
rewrites: Array<BasicBlock>; // Accumulated split blocks
};
```
### ScopeTraversalContext
```typescript
type ScopeTraversalContext = {
fallthroughs: Map<ScopeId, BlockId>; // Cache: scope -> its fallthrough block
rewrites: Array<TerminalRewriteInfo>;
env: Environment;
};
```
## Edge Cases
### Multiple Rewrites at Same Instruction ID
The while loop in Step 2 handles multiple scope start/ends at the same instruction ID.
### Nested Scopes
The pre-order traversal ensures outer scopes are processed before inner scopes, creating proper nesting in the CFG.
### Empty Blocks After Split
When a scope boundary falls at the start of a block, the split may create a block with no instructions (only a terminal).
### Control Flow Within Scopes
The pass preserves existing control flow (if/else, loops) within scopes; it only adds scope entry/exit points.
### Early Returns
When a return occurs within a scope, the scope terminal still has a fallthrough block, but that block may contain `Unreachable` terminal.
## TODOs
Line 283-284:
```typescript
// TODO make consistent instruction IDs instead of reusing
```
## Example
### Fixture: `reactive-scopes-if.js`
**Before BuildReactiveScopeTerminalsHIR:**
```
bb0 (block):
[1] $29_@0[1:22] = Array [] // x with scope @0 range [1:22]
[2] StoreLocal x$30_@0 = $29_@0
[3] $32 = LoadLocal a$26
[4] If ($32) then:bb2 else:bb3 fallthrough=bb1
bb2:
[5] $33_@1[5:11] = Array [] // y with scope @1 range [5:11]
...
```
**After BuildReactiveScopeTerminalsHIR:**
```
bb0 (block):
[1] Scope @0 [1:28] block=bb9 fallthrough=bb10 // <-- scope terminal inserted
bb9:
[2] $29_@0 = Array []
[3] StoreLocal x$30_@0 = $29_@0
[4] $32 = LoadLocal a$26
[5] If ($32) then:bb2 else:bb3 fallthrough=bb1
bb2:
[6] Scope @1 [6:14] block=bb11 fallthrough=bb12 // <-- nested scope terminal
bb11:
[7] $33_@1 = Array []
...
[13] Goto bb12 // <-- scope end goto
bb12:
...
bb1:
[27] Goto bb10 // <-- scope @0 end goto
bb10:
[28] $50 = LoadLocal x$30_@0
[29] Return $50
```
The key transformation is that scope boundaries become explicit control flow: a `Scope` terminal enters the scope content block, and a `Goto` terminal exits to the fallthrough block. This structure is later used to generate the memoization checks. | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/docs/passes/17-buildReactiveScopeTerminalsHIR.md |
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Class ANY (generic) rdata type classes."""
__all__ = [
'AFSDB',
'CERT',
'CNAME',
'DLV',
'DNAME',
'DNSKEY',
'DS',
'GPOS',
'HINFO',
'HIP',
'ISDN',
'KEY',
'LOC',
'MX',
'NS',
'NSEC',
'NSEC3',
'NSEC3PARAM',
'NXT',
'PTR',
'RP',
'RRSIG',
'RT',
'SIG',
'SOA',
'SPF',
'SSHFP',
'TXT',
'X25',
] | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from airflow.providers.google.cloud.example_dags.example_dataproc import BUCKET, PYSPARK_MAIN, SPARKR_MAIN
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_DATAPROC_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCS_URI = f"gs://{BUCKET}"
pyspark_file = """
#!/usr/bin/python
import pyspark
sc = pyspark.SparkContext()
rdd = sc.parallelize(['Hello,', 'world!'])
words = sorted(rdd.collect())
print(words)
"""
sparkr_file = """
#!/usr/bin/r
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = "/home/spark")
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session()
# Create the SparkDataFrame
df <- as.DataFrame(faithful)
head(summarize(groupBy(df, df$waiting), count = n(df$waiting)))
"""
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_DATAPROC_KEY)
class DataprocExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_DATAPROC_KEY)
def setUp(self):
super().setUp()
self.create_gcs_bucket(BUCKET)
self.upload_content_to_gcs(lines=pyspark_file, bucket=GCS_URI, filename=PYSPARK_MAIN)
self.upload_content_to_gcs(lines=sparkr_file, bucket=GCS_URI, filename=SPARKR_MAIN)
@provide_gcp_context(GCP_DATAPROC_KEY)
def tearDown(self):
self.delete_gcs_bucket(BUCKET)
super().tearDown()
@provide_gcp_context(GCP_DATAPROC_KEY)
def test_run_example_dag(self):
self.run_dag(dag_id="example_gcp_dataproc", dag_folder=CLOUD_DAG_FOLDER) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
extract_attributes,
try_get,
urlencode_postdata,
ExtractorError,
)
class TVPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvplayer\.com/watch/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://tvplayer.com/watch/bbcone',
'info_dict': {
'id': '89',
'ext': 'mp4',
'title': r're:^BBC One [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# m3u8 download
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
current_channel = extract_attributes(self._search_regex(
r'(<div[^>]+class="[^"]*current-channel[^"]*"[^>]*>)',
webpage, 'channel element'))
title = current_channel['data-name']
resource_id = current_channel['data-id']
token = self._search_regex(
r'data-token=(["\'])(?P<token>(?!\1).+)\1', webpage,
'token', group='token')
context = self._download_json(
'https://tvplayer.com/watch/context', display_id,
'Downloading JSON context', query={
'resource': resource_id,
'gen': token,
})
validate = context['validate']
platform = try_get(
context, lambda x: x['platform']['key'], compat_str) or 'firefox'
try:
response = self._download_json(
'http://api.tvplayer.com/api/v2/stream/live',
display_id, 'Downloading JSON stream', headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}, data=urlencode_postdata({
'id': resource_id,
'service': 1,
'platform': platform,
'validate': validate,
}))['tvplayer']['response']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
response = self._parse_json(
e.cause.read().decode(), resource_id)['tvplayer']['response']
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, response['error']), expected=True)
raise
formats = self._extract_m3u8_formats(response['stream'], display_id, 'mp4')
self._sort_formats(formats)
return {
'id': resource_id,
'display_id': display_id,
'title': self._live_title(title),
'formats': formats,
'is_live': True,
} | unknown | codeparrot/codeparrot-clean | ||
def string_length(str1):
count = 0
for char in str1:
count += 1
return count | unknown | mbpp | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import zmq
if __name__ == '__main__':
ctx = zmq.Context()
s = ctx.socket(zmq.REP)
s.bind('tcp://*:5555')
print 'reply serevr init success ...'
while True:
try:
msg = s.recv()
s.send(msg)
except KeyboardInterrupt:
break
s.close() | unknown | codeparrot/codeparrot-clean | ||
import beanstalkc
import yaml
import logging
import pprint
import sys
from collections import OrderedDict
from .config import config
from . import report
log = logging.getLogger(__name__)
def connect():
host = config.queue_host
port = config.queue_port
if host is None or port is None:
raise RuntimeError(
'Beanstalk queue information not found in {conf_path}'.format(
conf_path=config.teuthology_yaml))
return beanstalkc.Connection(host=host, port=port)
def watch_tube(connection, tube_name):
"""
Watch a given tube, potentially correcting to 'multi' if necessary. Returns
the tube_name that was actually used.
"""
if ',' in tube_name:
log.debug("Correcting tube name to 'multi'")
tube_name = 'multi'
connection.watch(tube_name)
connection.ignore('default')
return tube_name
def walk_jobs(connection, tube_name, processor, pattern=None):
"""
def callback(jobs_dict)
"""
log.info("Checking Beanstalk Queue...")
job_count = connection.stats_tube(tube_name)['current-jobs-ready']
if job_count == 0:
log.info('No jobs in Beanstalk Queue')
return
# Try to figure out a sane timeout based on how many jobs are in the queue
timeout = job_count / 2000.0 * 60
for i in range(1, job_count + 1):
print_progress(i, job_count, "Loading")
job = connection.reserve(timeout=timeout)
if job is None or job.body is None:
continue
job_config = yaml.safe_load(job.body)
job_name = job_config['name']
job_id = job.stats()['id']
if pattern is not None and pattern not in job_name:
continue
processor.add_job(job_id, job_config, job)
end_progress()
processor.complete()
def print_progress(index, total, message=None):
msg = "{m} ".format(m=message) if message else ''
sys.stderr.write("{msg}{i}/{total}\r".format(
msg=msg, i=index, total=total))
sys.stderr.flush()
def end_progress():
sys.stderr.write('\n')
sys.stderr.flush()
class JobProcessor(object):
def __init__(self):
self.jobs = OrderedDict()
def add_job(self, job_id, job_config, job_obj=None):
job_id = str(job_id)
job_dict = dict(
index=(len(self.jobs) + 1),
job_config=job_config,
)
if job_obj:
job_dict['job_obj'] = job_obj
self.jobs[job_id] = job_dict
self.process_job(job_id)
def process_job(self, job_id):
pass
def complete(self):
pass
class JobPrinter(JobProcessor):
def __init__(self, show_desc=False, full=False):
super(JobPrinter, self).__init__()
self.show_desc = show_desc
self.full = full
def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_index = self.jobs[job_id]['index']
job_name = job_config['name']
job_desc = job_config['description']
print 'Job: {i:>4} {job_name}/{job_id}'.format(
i=job_index,
job_id=job_id,
job_name=job_name,
)
if self.full:
pprint.pprint(job_config)
elif job_desc and self.show_desc:
for desc in job_desc.split():
print '\t {desc}'.format(desc=desc)
class RunPrinter(JobProcessor):
def __init__(self):
super(RunPrinter, self).__init__()
self.runs = list()
def process_job(self, job_id):
run = self.jobs[job_id]['job_config']['name']
if run not in self.runs:
self.runs.append(run)
print run
class JobDeleter(JobProcessor):
def __init__(self, pattern):
self.pattern = pattern
super(JobDeleter, self).__init__()
def add_job(self, job_id, job_config, job_obj=None):
job_name = job_config['name']
if self.pattern in job_name:
super(JobDeleter, self).add_job(job_id, job_config, job_obj)
def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_name = job_config['name']
print 'Deleting {job_name}/{job_id}'.format(
job_id=job_id,
job_name=job_name,
)
job_obj = self.jobs[job_id].get('job_obj')
if job_obj:
job_obj.delete()
report.try_delete_jobs(job_name, job_id)
def main(args):
machine_type = args['--machine_type']
delete = args['--delete']
runs = args['--runs']
show_desc = args['--description']
full = args['--full']
try:
connection = connect()
watch_tube(connection, machine_type)
if delete:
walk_jobs(connection, machine_type,
JobDeleter(delete))
elif runs:
walk_jobs(connection, machine_type,
RunPrinter())
else:
walk_jobs(connection, machine_type,
JobPrinter(show_desc=show_desc, full=full))
except KeyboardInterrupt:
log.info("Interrupted.")
finally:
connection.close() | unknown | codeparrot/codeparrot-clean | ||
import itertools
import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import int_to_base36, urlsafe_base64_decode, urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils.importlib import import_module
from django.utils._os import upath
from django.test import TestCase
from django.test.utils import override_settings, patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_valid_base36(self):
# Remove in Django 1.7
url, path = self._test_confirm_start()
path_parts = path.strip("/").split("/")
# construct an old style (base36) URL by converting the base64 ID
path_parts[1] = int_to_base36(int(urlsafe_base64_decode(path_parts[1])))
response = self.client.get("/%s/%s-%s/" % tuple(path_parts))
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user_base36(self):
# Remove in Django 1.7
response = self.client.get('/reset/123456-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user_base36(self):
# Remove in Django 1.7
response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
response = self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
req.REQUEST = req.POST
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session['django_language'] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session['django_language'], 'pl')
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1') | unknown | codeparrot/codeparrot-clean | ||
"""Support for tracking MQTT enabled devices."""
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import CONF_DEVICES
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import CONF_QOS
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(mqtt.SCHEMA_BASE).extend({
vol.Required(CONF_DEVICES): {cv.string: mqtt.valid_subscribe_topic},
})
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up the MQTT tracker."""
devices = config[CONF_DEVICES]
qos = config[CONF_QOS]
for dev_id, topic in devices.items():
@callback
def async_message_received(msg, dev_id=dev_id):
"""Handle received MQTT message."""
hass.async_create_task(
async_see(dev_id=dev_id, location_name=msg.payload))
await mqtt.async_subscribe(
hass, topic, async_message_received, qos)
return True | unknown | codeparrot/codeparrot-clean | ||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# AWQ
[Activation-aware Weight Quantization (AWQ)](https://hf.co/papers/2306.00978) preserves a small fraction of the weights that are important for LLM performance to compress a model to 4-bits with minimal performance degradation.
There are several libraries for quantizing models with the AWQ algorithm, such as [llm-awq](https://github.com/mit-han-lab/llm-awq), [autoawq](https://github.com/casper-hansen/AutoAWQ) or [optimum-intel](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc). Transformers supports loading models quantized with the llm-awq and autoawq libraries. This guide will show you how to load models quantized with autoawq, but the process is similar for llm-awq quantized models.
Run the command below to install autoawq
```bash
pip install autoawq
```
> [!WARNING]
> AutoAWQ downgrades Transformers to version 4.47.1. If you want to do inference with AutoAWQ, you may need to reinstall your Transformers' version after installing AutoAWQ.
Identify an AWQ-quantized model by checking the `quant_method` key in the models [config.json](https://huggingface.co/TheBloke/zephyr-7B-alpha-AWQ/blob/main/config.json) file.
```json
{
"_name_or_path": "/workspace/process/huggingfaceh4_zephyr-7b-alpha/source",
"architectures": [
"MistralForCausalLM"
],
...
...
...
"quantization_config": {
"quant_method": "awq",
"zero_point": true,
"group_size": 128,
"bits": 4,
"version": "gemm"
}
}
```
Load the AWQ-quantized model with [`~PreTrainedModel.from_pretrained`]. This automatically sets the other weights to fp16 by default for performance reasons. Use the `dtype` parameter to load these other weights in a different format.
If the model is loaded on the CPU, use the `device_map` parameter to move it to an accelerator.
```py
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import Accelerator
import torch
device = Accelerator().device
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/zephyr-7B-alpha-AWQ",
dtype=torch.float32,
device_map=device
)
```
Use `attn_implementation` to enable [FlashAttention2](../perf_infer_gpu_one#flashattention-2) to further accelerate inference.
```py
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/zephyr-7B-alpha-AWQ",
attn_implementation="flash_attention_2",
device_map="cuda:0"
)
```
## Fused modules
Fused modules offer improved accuracy and performance. They are supported out-of-the-box for AWQ modules for [Llama](https://huggingface.co/meta-llama) and [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) architectures, but you can also fuse AWQ modules for unsupported architectures.
> [!WARNING]
> Fused modules cannot be combined with other optimization techniques such as FlashAttention2.
<hfoptions id="fuse">
<hfoption id="supported architectures">
Create an [`AwqConfig`] and set the parameters `fuse_max_seq_len` and `do_fuse=True` to enable fused modules. The `fuse_max_seq_len` parameter is the total sequence length and it should include the context length and the expected generation length. Set it to a larger value to be safe.
The example below fuses the AWQ modules of the [TheBloke/Mistral-7B-OpenOrca-AWQ](https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-AWQ) model.
```python
import torch
from transformers import AwqConfig, AutoModelForCausalLM
quantization_config = AwqConfig(
bits=4,
fuse_max_seq_len=512,
do_fuse=True,
)
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/Mistral-7B-OpenOrca-AWQ",
quantization_config=quantization_config
).to(0)
```
The [TheBloke/Mistral-7B-OpenOrca-AWQ](https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-AWQ) model was benchmarked with `batch_size=1` with and without fused modules.
<figcaption class="text-center text-gray-500 text-lg">Unfused module</figcaption>
| Batch Size | Prefill Length | Decode Length | Prefill tokens/s | Decode tokens/s | Memory (VRAM) |
|-------------:|-----------------:|----------------:|-------------------:|------------------:|:----------------|
| 1 | 32 | 32 | 60.0984 | 38.4537 | 4.50 GB (5.68%) |
| 1 | 64 | 64 | 1333.67 | 31.6604 | 4.50 GB (5.68%) |
| 1 | 128 | 128 | 2434.06 | 31.6272 | 4.50 GB (5.68%) |
| 1 | 256 | 256 | 3072.26 | 38.1731 | 4.50 GB (5.68%) |
| 1 | 512 | 512 | 3184.74 | 31.6819 | 4.59 GB (5.80%) |
| 1 | 1024 | 1024 | 3148.18 | 36.8031 | 4.81 GB (6.07%) |
| 1 | 2048 | 2048 | 2927.33 | 35.2676 | 5.73 GB (7.23%) |
<figcaption class="text-center text-gray-500 text-lg">Fused module</figcaption>
| Batch Size | Prefill Length | Decode Length | Prefill tokens/s | Decode tokens/s | Memory (VRAM) |
|-------------:|-----------------:|----------------:|-------------------:|------------------:|:----------------|
| 1 | 32 | 32 | 81.4899 | 80.2569 | 4.00 GB (5.05%) |
| 1 | 64 | 64 | 1756.1 | 106.26 | 4.00 GB (5.05%) |
| 1 | 128 | 128 | 2479.32 | 105.631 | 4.00 GB (5.06%) |
| 1 | 256 | 256 | 1813.6 | 85.7485 | 4.01 GB (5.06%) |
| 1 | 512 | 512 | 2848.9 | 97.701 | 4.11 GB (5.19%) |
| 1 | 1024 | 1024 | 3044.35 | 87.7323 | 4.41 GB (5.57%) |
| 1 | 2048 | 2048 | 2715.11 | 89.4709 | 5.57 GB (7.04%) |
The speed and throughput of fused and unfused modules were also tested with the [optimum-benchmark](https://github.com/huggingface/optimum-benchmark) library.
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/fused_forward_memory_plot.png" alt="generate throughput per batch size" />
<figcaption class="mt-2 text-center text-sm text-gray-500">forward peak memory/batch size</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/fused_generate_throughput_plot.png" alt="forward latency per batch size" />
<figcaption class="mt-2 text-center text-sm text-gray-500">generate throughput/batch size</figcaption>
</div>
</div>
</hfoption>
<hfoption id="unsupported architectures">
For architectures that don't support fused modules, create an [`AwqConfig`] and define a custom fusing mapping in `modules_to_fuse` to determine which modules need to be fused.
The example below fuses the AWQ modules of the [TheBloke/Yi-34B-AWQ](https://huggingface.co/TheBloke/Yi-34B-AWQ) model.
```python
import torch
from transformers import AwqConfig, AutoModelForCausalLM
quantization_config = AwqConfig(
bits=4,
fuse_max_seq_len=512,
modules_to_fuse={
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
"layernorm": ["ln1", "ln2", "norm"],
"mlp": ["gate_proj", "up_proj", "down_proj"],
"use_alibi": False,
"num_attention_heads": 56,
"num_key_value_heads": 8,
"hidden_size": 7168
}
)
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/Yi-34B-AWQ",
quantization_config=quantization_config
).to(0)
```
The parameter `modules_to_fuse` should include the following keys.
- `"attention"`: The names of the attention layers to fuse in the following order: query, key, value and output projection layer. If you don't want to fuse these layers, pass an empty list.
- `"layernorm"`: The names of all the LayerNorm layers you want to replace with a custom fused LayerNorm. If you don't want to fuse these layers, pass an empty list.
- `"mlp"`: The names of the MLP layers you want to fuse into a single MLP layer in the order: (gate (dense, layer, post-attention) / up / down layers).
- `"use_alibi"`: If your model uses ALiBi positional embedding.
- `"num_attention_heads"`: The number of attention heads.
- `"num_key_value_heads"`: The number of key value heads that should be used to implement Grouped Query Attention (GQA).
| parameter value | attention |
|---|---|
| `num_key_value_heads=num_attention_heads` | Multi-Head Attention |
| `num_key_value_heads=1` | Multi-Query Attention |
| `num_key_value_heads=...` | Grouped Query Attention |
- `"hidden_size"`: The dimension of the hidden representations.
</hfoption>
</hfoptions>
## ExLlamaV2
[ExLlamaV2](https://github.com/turboderp/exllamav2) kernels support faster prefill and decoding. Run the command below to install the latest version of autoawq with ExLlamaV2 support.
```bash
pip install git+https://github.com/casper-hansen/AutoAWQ.git
```
Set `version="exllama"` in [`AwqConfig`] to enable ExLlamaV2 kernels.
> [!TIP]
> ExLlamaV2 is supported on AMD GPUs.
```py
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, AwqConfig
quantization_config = AwqConfig(version="exllama")
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/Mistral-7B-Instruct-v0.1-AWQ",
quantization_config=quantization_config,
device_map="auto",
)
```
## Resources
Run the AWQ demo [notebook](https://colab.research.google.com/drive/1HzZH89yAXJaZgwJDhQj9LqSBux932BvY#scrollTo=Wwsg6nCwoThm) for more examples of how to quantize a model, push a quantized model to the Hub, and more. | unknown | github | https://github.com/huggingface/transformers | docs/source/en/quantization/awq.md |
#!/usr/bin/env python
"""
@package mi.dataset.driver.flort_dj.sio
@file mi-dataset/mi/dataset/driver/flort_dj/sio/flort_dj_sio_telemetered_driver.py
@author Joe Padula
@brief Telemetered driver for the flort_dj_sio instrument
Release notes:
Initial Release
"""
__author__ = 'jpadula'
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.flort_dj_sio import FlortDjSioParser
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = FlortDjSioTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class FlortDjSioTelemeteredDriver(SimpleDatasetDriver):
"""
The flort_dj_sio recovered driver class extends the SimpleDatasetDriver.
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flort_dj_sio',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlortdParserDataParticle'
}
parser = FlortDjSioParser(parser_config,
stream_handle,
self._exception_callback)
return parser | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
##########################################################################
#
# Copyright 2015 VMware, Inc.
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
import json
import optparse
import re
import difflib
import sys
def strip_object_hook(obj):
if '__class__' in obj:
return None
for name in obj.keys():
if name.startswith('__') and name.endswith('__'):
del obj[name]
return obj
class Visitor:
def visit(self, node, *args, **kwargs):
if isinstance(node, dict):
return self.visitObject(node, *args, **kwargs)
elif isinstance(node, list):
return self.visitArray(node, *args, **kwargs)
else:
return self.visitValue(node, *args, **kwargs)
def visitObject(self, node, *args, **kwargs):
pass
def visitArray(self, node, *args, **kwargs):
pass
def visitValue(self, node, *args, **kwargs):
pass
class Dumper(Visitor):
def __init__(self, stream = sys.stdout):
self.stream = stream
self.level = 0
def _write(self, s):
self.stream.write(s)
def _indent(self):
self._write(' '*self.level)
def _newline(self):
self._write('\n')
def visitObject(self, node):
self.enter_object()
members = node.keys()
members.sort()
for i in range(len(members)):
name = members[i]
value = node[name]
self.enter_member(name)
self.visit(value)
self.leave_member(i == len(members) - 1)
self.leave_object()
def enter_object(self):
self._write('{')
self._newline()
self.level += 1
def enter_member(self, name):
self._indent()
self._write('%s: ' % name)
def leave_member(self, last):
if not last:
self._write(',')
self._newline()
def leave_object(self):
self.level -= 1
self._indent()
self._write('}')
if self.level <= 0:
self._newline()
def visitArray(self, node):
self.enter_array()
for i in range(len(node)):
value = node[i]
self._indent()
self.visit(value)
if i != len(node) - 1:
self._write(',')
self._newline()
self.leave_array()
def enter_array(self):
self._write('[')
self._newline()
self.level += 1
def leave_array(self):
self.level -= 1
self._indent()
self._write(']')
def visitValue(self, node):
self._write(json.dumps(node, allow_nan=True))
class Comparer(Visitor):
def __init__(self, ignore_added = False, tolerance = 2.0 ** -24):
self.ignore_added = ignore_added
self.tolerance = tolerance
def visitObject(self, a, b):
if not isinstance(b, dict):
return False
if len(a) != len(b) and not self.ignore_added:
return False
ak = a.keys()
bk = b.keys()
ak.sort()
bk.sort()
if ak != bk and not self.ignore_added:
return False
for k in ak:
ae = a[k]
try:
be = b[k]
except KeyError:
return False
if not self.visit(ae, be):
return False
return True
def visitArray(self, a, b):
if not isinstance(b, list):
return False
if len(a) != len(b):
return False
for ae, be in zip(a, b):
if not self.visit(ae, be):
return False
return True
def visitValue(self, a, b):
if isinstance(a, float) and isinstance(b, (int, long, float)) or \
isinstance(b, float) and isinstance(a, (int, long, float)):
if a is b:
# NaNs take this path
return True
elif a == b:
return True
elif a == 0:
return abs(b) < self.tolerance
else:
return abs((b - a)/a) < self.tolerance
else:
return a == b
class Differ(Visitor):
def __init__(self, stream = sys.stdout, ignore_added = False):
self.dumper = Dumper(stream)
self.comparer = Comparer(ignore_added = ignore_added)
def visit(self, a, b):
if self.comparer.visit(a, b):
return
Visitor.visit(self, a, b)
def visitObject(self, a, b):
if not isinstance(b, dict):
self.replace(a, b)
else:
self.dumper.enter_object()
names = set(a.keys())
if not self.comparer.ignore_added:
names.update(b.keys())
names = list(names)
names.sort()
for i in range(len(names)):
name = names[i]
ae = a.get(name, None)
be = b.get(name, None)
if not self.comparer.visit(ae, be):
self.dumper.enter_member(name)
self.visit(ae, be)
self.dumper.leave_member(i == len(names) - 1)
self.dumper.leave_object()
def visitArray(self, a, b):
if not isinstance(b, list):
self.replace(a, b)
else:
self.dumper.enter_array()
max_len = max(len(a), len(b))
for i in range(max_len):
try:
ae = a[i]
except IndexError:
ae = None
try:
be = b[i]
except IndexError:
be = None
self.dumper._indent()
if self.comparer.visit(ae, be):
self.dumper.visit(ae)
else:
self.visit(ae, be)
if i != max_len - 1:
self.dumper._write(',')
self.dumper._newline()
self.dumper.leave_array()
def visitValue(self, a, b):
if a != b:
self.replace(a, b)
def replace(self, a, b):
if self.isMultilineString(a) or self.isMultilineString(b):
a = str(a)
b = str(b)
a = a.splitlines()
b = b.splitlines()
differ = difflib.Differ()
result = differ.compare(a, b)
self.dumper.level += 1
for entry in result:
self.dumper._newline()
self.dumper._indent()
tag = entry[:2]
text = entry[2:]
if tag == '? ':
tag = ' '
prefix = ' '
text = text.rstrip()
suffix = ''
else:
prefix = '"'
suffix = '\\n"'
line = tag + prefix + text + suffix
self.dumper._write(line)
self.dumper.level -= 1
return
self.dumper.visit(a)
self.dumper._write(' -> ')
self.dumper.visit(b)
def isMultilineString(self, value):
return isinstance(value, basestring) and '\n' in value
#
# Unfortunately JSON standard does not include comments, but this is a quite
# useful feature to have on regressions tests
#
_token_res = [
r'//[^\r\n]*', # comment
r'"[^"\\]*(\\.[^"\\]*)*"', # string
]
_tokens_re = re.compile(r'|'.join(['(' + token_re + ')' for token_re in _token_res]), re.DOTALL)
def _strip_comment(mo):
if mo.group(1):
return ''
else:
return mo.group(0)
def _strip_comments(data):
'''Strip (non-standard) JSON comments.'''
return _tokens_re.sub(_strip_comment, data)
assert _strip_comments('''// a comment
"// a comment in a string
"''') == '''
"// a comment in a string
"'''
def load(stream, strip_images = True, strip_comments = True):
if strip_images:
object_hook = strip_object_hook
else:
object_hook = None
if strip_comments:
data = stream.read()
data = _strip_comments(data)
return json.loads(data, strict=False, object_hook = object_hook)
else:
return json.load(stream, strict=False, object_hook = object_hook)
def main():
optparser = optparse.OptionParser(
usage="\n\t%prog [options] <ref_json> <src_json>")
optparser.add_option(
'--ignore-added',
action="store_true", dest="ignore_added", default=False,
help="ignore added state")
optparser.add_option(
'--keep-images',
action="store_false", dest="strip_images", default=True,
help="compare images")
(options, args) = optparser.parse_args(sys.argv[1:])
if len(args) != 2:
optparser.error('incorrect number of arguments')
a = load(open(args[0], 'rt'), options.strip_images)
b = load(open(args[1], 'rt'), options.strip_images)
if False:
dumper = Dumper()
dumper.visit(a)
differ = Differ(ignore_added = options.ignore_added)
differ.visit(a, b)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import unittest
from simpletex.base import Brace, Command, Environment
class TestBrace(unittest.TestCase):
def test_empty(self):
self.assertEqual(Brace()(), '')
def test_args(self):
self.assertEqual(Brace()('a', 'b', 'c'), '{a}{b}{c}')
class TestCommand(unittest.TestCase):
def test_simple(self):
self.assertEqual(str(Command('a')), r'\a')
def test_arguments(self):
self.assertEqual(str(Command('a',
['b', 'c'])), r'\a{b}{c}')
def test_options(self):
self.assertEqual(str(Command('a',
[],
'b', 'c')), r'\a[b, c]')
def test_kwoptions(self):
self.assertIn(str(Command('a',
[],
d='e',
b='c')),
(r'\a[b=c, d=e]',
r'\a[d=e, b=c]'))
def test_all(self):
self.assertIn(str(Command('a',
['b', 'c'],
'd', 'e',
f='g', h='i')),
(r'\a[d, e, f=g, h=i]{b}{c}',
r'\a[d, e, h=i, f=g]{b}{c}'))
class TestEnvironment(unittest.TestCase):
def test_no_name(self):
self.assertRaises(ValueError, Environment(), '')
def test_empty(self):
self.assertEqual(Environment('name')(''),
'\\begin{name}\n\n\\end{name}')
def test_single_line(self):
self.assertEqual(Environment('name')('text'),
'\\begin{name}\n\ttext\n\\end{name}')
def test_multiline(self):
self.assertEqual(Environment('name')('a\nb'),
'\\begin{name}\n\ta\n\tb\n\\end{name}') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#!encoding:utf-8
import os
import sys
import re
import commands
import glob
import fnmatch
import string
from xml.etree import ElementTree as ET
from optparse import OptionParser
def iterfindfiles(path, fnexp):
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
def count_upstream(string=None, str_entry=None):
if string.find('/%s/' % str_entry) >= 0:
return 1
else:
return 0
def analy_test_file(file_path=None):
total_number = 0
total_auto = 0
total_auto_webdriver = 0
total_manual = 0
p0_number = 0
p0_auto = 0
p0_auto_webdriver = 0
p0_manual = 0
p1_number = 0
p1_auto = 0
p1_auto_webdriver = 0
p1_manual = 0
p2_number = 0
p2_auto = 0
p2_auto_webdriver = 0
p2_manual = 0
p3_number = 0
p3_auto = 0
p3_auto_webdriver = 0
p3_manual = 0
try:
suite_name = os.path.basename(os.path.dirname(file_path))
tree = ET.parse(file_path)
root = tree.getroot()
for set_node in root.findall('suite/set'):
flag = 0
s_type = set_node.get('type')
if s_type == 'ref' or s_type == 'script':
if os.path.split(file_path)[0].split('/')[-1].find("wrt") > -1:
flag = 0
else:
flag = 1
for tc_node in set_node.findall('testcase'):
subcase = 1
s_status = tc_node.get('status')
s_subcase = tc_node.get('subcase')
s_priority = tc_node.get('priority')
s_execution_type = tc_node.get('execution_type')
if s_status == 'designed':
continue
if s_subcase:
subcase = string.atoi(s_subcase)
if s_priority == 'P0':
if s_execution_type == "auto":
p0_auto += 1 * subcase
if flag == 1:
p0_auto_webdriver += 1 * subcase
else:
p0_manual += 1 * subcase
elif s_priority == 'P1':
if s_execution_type == "auto":
p1_auto += 1 * subcase
if flag == 1:
p1_auto_webdriver += 1 * subcase
else:
p1_manual += 1 * subcase
elif s_priority == 'P2':
if s_execution_type == "auto":
p2_auto += 1 * subcase
if flag == 1:
p2_auto_webdriver += 1 * subcase
else:
p2_manual += 1 * subcase
elif s_priority == 'P3':
if s_execution_type == "auto":
p3_auto += 1 * subcase
if flag == 1:
p3_auto_webdriver += 1 * subcase
else:
p3_manual += 1 * subcase
except Exception as e:
print "Got error when analy test files: %s" % e
print file_path
p0_number = p0_auto + p0_manual
p1_number = p1_auto + p1_manual
p2_number = p2_auto + p2_manual
p3_number = p3_auto + p3_manual
total_auto = p0_auto + p1_auto + p2_auto + p3_auto
total_auto_webdriver = p0_auto_webdriver + \
p1_auto_webdriver + p2_auto_webdriver + p3_auto_webdriver
total_manual = p0_manual + p1_manual + p2_manual + p3_manual
total_number = total_auto + total_manual
case_message = suite_name + " " + str(total_number) + " " + str(total_auto) + " " + str(total_auto_webdriver) + " " + str(total_manual) + " " + str(p0_number) + " " + str(p0_auto) + " " + str(p0_auto_webdriver) + " " + str(p0_manual) + " " + str(p1_number) + " " + str(
p1_auto) + " " + str(p1_auto_webdriver) + " " + str(p1_manual) + " " + str(p2_number) + " " + str(p2_auto) + " " + str(p2_auto_webdriver) + " " + str(p2_manual) + " " + str(p3_number) + " " + str(p3_auto) + " " + str(p3_auto_webdriver) + " " + str(p3_manual) + " "
return case_message
def get_upstream(file_path):
n_upstream = 0
upstream_name = [
"w3c",
"csswg",
"webkit",
"khronos",
"blink",
"ecmascript_simd"]
exist_upstream = []
try:
suite_name = os.path.basename(os.path.dirname(file_path))
tree = ET.parse(file_path)
root = tree.getroot()
for entry_node in root.findall(
'suite/set/testcase/description/test_script_entry'):
for element in upstream_name:
if count_upstream(entry_node.text, element) == 1:
n_upstream += 1
if element not in exist_upstream:
exist_upstream.append(element)
except Exception as e:
print e
upstream = ""
for element in exist_upstream:
upstream += element + "/"
return str(n_upstream) + " " + upstream[: -1]
def get_case_status(file_path):
if "tct-widget02-w3c-tests" in file_path:
return
if "tct-testconfig" in file_path:
return
if "xwalk-system-tests" in file_path:
return
try:
case_message = analy_test_file(file_path)
upstream = get_upstream(file_path)
content = case_message + upstream
fp = open("analy_result.csv", 'a')
fp.write(content)
fp.write("\n")
fp.close()
except Exception as e:
print "Got error when get case status: %s" % e
def init_result_file():
title = "Suite_name,Total,Total_auto,Total_auto_webdriver,Total_manual,P0,P0_auto,P0_auto_webdriver,P0_manual,P1,P1_auto,\
P1_auto_webdriver,P1_manual,P2,P2_auto,P2_auto_webdriver,P2_manual,P3,P3_auto,P3_auto_webdriver,P3_manual,\
Integrated_Upstream_TCs,Upstream_Resource"
try:
file_path = os.getcwd() + "/analy_result.csv"
if os.path.exists(file_path):
os.remove(file_path)
fp = open("analy_result.csv", 'a')
fp.write(title)
fp.write("\n")
fp.close()
except Exception as e:
print "Got error when init analy file : %s" % e
def main():
try:
usage = "./stats.py -f ../../webapi/tct-2dtransforms-css3-tests/tests.full.xml"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-r",
dest="suitesdir",
help="specify the path of folder which tests.full.xml located in.")
opts_parser.add_option(
"-f",
dest="xmlfile",
help="specify the path of tests.full.xml file")
init_result_file()
if len(sys.argv) == 1:
sys.argv.append("-h")
(PARAMETERS, args) = opts_parser.parse_args()
if PARAMETERS.suitesdir:
for filename in iterfindfiles(
"%s" % PARAMETERS.suitesdir, "tests.full.xml"):
get_case_status(filename)
if PARAMETERS.xmlfile:
get_case_status(PARAMETERS.xmlfile)
except Exception as e:
print "Got error: %s, exit" % e
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
test = TestGyp.TestGyp(formats=['!make'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2014, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef MUTEX_LOCK_INCLUDED
#define MUTEX_LOCK_INCLUDED
/**
@file include/mutex_lock.h
*/
#include <mysql/psi/mysql_mutex.h>
#include <utility>
/**
A simple wrapper around a mutex:
Grabs the mutex in the CTOR, releases it in the DTOR.
The mutex may be NULL, in which case this is a no-op.
Templated to allow unit testing with mocked mutex. Not copyable since
ownership of a mutex cannot be shared, but movable so that ownership can be
transferred to a different Mutex_lock.
*/
template <class MUTEX>
class Generic_mutex_lock {
public:
Generic_mutex_lock() noexcept = default;
Generic_mutex_lock(MUTEX *mutex, const char *src_file, int src_line) noexcept
: m_mutex(mutex), m_src_file(src_file), m_src_line(src_line) {
if (m_mutex) {
mysql_mutex_lock_with_src(m_mutex, m_src_file, m_src_line);
}
}
~Generic_mutex_lock() noexcept {
if (m_mutex) {
mysql_mutex_unlock_with_src(m_mutex, m_src_file, m_src_line);
}
}
Generic_mutex_lock(const Generic_mutex_lock &) = delete;
Generic_mutex_lock(Generic_mutex_lock &&src) noexcept
: m_mutex{src.m_mutex},
m_src_file{src.m_src_file},
m_src_line{src.m_src_line} {
src.m_mutex = nullptr;
src.m_src_file = nullptr;
src.m_src_line = 0;
}
Generic_mutex_lock &operator=(const Generic_mutex_lock &) = delete;
Generic_mutex_lock &operator=(Generic_mutex_lock &&src) noexcept {
Generic_mutex_lock tmp{std::move(src)};
std::swap(m_mutex, tmp.m_mutex);
m_src_file = tmp.m_src_file;
m_src_line = tmp.m_src_line;
return *this;
}
private:
MUTEX *m_mutex = nullptr;
const char *m_src_file = nullptr;
int m_src_line = 0;
};
using Mutex_lock = Generic_mutex_lock<mysql_mutex_t>;
#define MUTEX_LOCK(NAME, X) const Mutex_lock NAME(X, __FILE__, __LINE__)
#endif // MUTEX_LOCK_INCLUDED | c | github | https://github.com/mysql/mysql-server | include/mutex_lock.h |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import entropy_impl as entropy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions = distributions_lib
layers = layers_lib
entropy = entropy_lib
class NormalNoEntropy(distributions.Normal): # pylint: disable=no-init
"""Normal distribution without a `.entropy` method."""
def entropy(self):
return NotImplementedError('Entropy removed by gremlins')
def get_train_op(scalar_loss, optimizer='SGD', learning_rate=1.0, decay=0.0):
global_step = variables.Variable(0)
def decay_fn(rate, t):
return rate * (1 + math_ops.to_float(t))**(-decay)
train_op = layers.optimize_loss(
scalar_loss,
global_step,
learning_rate,
optimizer,
learning_rate_decay_fn=decay_fn)
return train_op
def _assert_monotonic_decreasing(array, atol=1e-5):
array = np.asarray(array)
_assert_monotonic_increasing(-array, atol=atol)
def _assert_monotonic_increasing(array, atol=1e-5):
array = np.asarray(array)
diff = np.diff(array.ravel())
np.testing.assert_array_less(-1 * atol, diff)
class ElboRatioTest(test.TestCase):
"""Show sampling converges to true KL values."""
def setUp(self):
self._rng = np.random.RandomState(0)
def test_convergence_to_kl_using_sample_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use samples
# to estimate every part of the KL divergence ratio.
vector_shape = (2, 3)
n_samples = 5000
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
p = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
actual_kl = distributions.kl_divergence(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.05)
def test_convergence_to_kl_using_analytic_entropy_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use an
# analytic entropy combined with sampled cross-entropy.
n_samples = 5000
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
p = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.analytic_entropy,
seed=42)
actual_kl = distributions.kl_divergence(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.1)
def test_sample_kl_zero_when_p_and_q_are_the_same_distribution(self):
n_samples = 50
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=q.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(np.zeros(2), sample_kl.eval())
class EntropyShannonTest(test.TestCase):
def test_normal_entropy_default_form_uses_exact_entropy(self):
with self.test_session():
dist = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(dist, n=11)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval())
def test_normal_entropy_analytic_form_uses_exact_entropy(self):
with self.test_session():
dist = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(
dist, form=entropy.ELBOForms.analytic_entropy)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval())
def test_normal_entropy_sample_form_gets_approximate_answer(self):
# Tested by showing we get a good answer that is not exact.
with self.test_session():
dist = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(
dist, n=1000, form=entropy.ELBOForms.sample, seed=0)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)
# Make sure there is some error, proving we used samples
self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
def test_default_entropy_falls_back_on_sample_if_analytic_not_available(self):
# Tested by showing we get a good answer that is not exact.
with self.test_session():
# NormalNoEntropy is like a Normal, but does not have .entropy method, so
# we are forced to fall back on sample entropy.
dist_no_entropy = NormalNoEntropy(loc=1.11, scale=2.22)
dist_yes_entropy = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(
dist_no_entropy, n=1000, form=entropy.ELBOForms.sample, seed=0)
exact_entropy = dist_yes_entropy.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)
# Make sure there is some error, proving we used samples
self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
class RenyiRatioTest(test.TestCase):
"""Show renyi_ratio is minimized when the distributions match."""
def setUp(self):
self._rng = np.random.RandomState(0)
def test_fitting_two_dimensional_normal_n_equals_1000(self):
# Minmizing Renyi divergence should allow us to make one normal match
# another one exactly.
n = 1000
mu_true = np.array([1.0, -1.0], dtype=np.float64)
chol_true = np.array([[2.0, 0.0], [0.5, 1.0]], dtype=np.float64)
with self.test_session() as sess:
target = distributions.MultivariateNormalTriL(mu_true, chol_true)
# Set up q distribution by defining mean/covariance as Variables
mu = variables.Variable(
np.zeros(mu_true.shape), dtype=mu_true.dtype, name='mu')
mat = variables.Variable(
np.zeros(chol_true.shape), dtype=chol_true.dtype, name='mat')
chol = distributions.matrix_diag_transform(mat, transform=nn_ops.softplus)
q = distributions.MultivariateNormalTriL(mu, chol)
for alpha in [0.25, 0.75]:
negative_renyi_divergence = entropy.renyi_ratio(
log_p=target.log_prob, q=q, n=n, alpha=alpha, seed=0)
train_op = get_train_op(
math_ops.reduce_mean(-negative_renyi_divergence),
optimizer='SGD',
learning_rate=0.5,
decay=0.1)
variables.global_variables_initializer().run()
renyis = []
for step in range(1000):
sess.run(train_op)
if step in [1, 5, 100]:
renyis.append(negative_renyi_divergence.eval())
# This optimization should maximize the renyi divergence.
_assert_monotonic_increasing(renyis, atol=0)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(target.loc.eval(), q.loc.eval(), rtol=0.06)
self.assertAllClose(target.scale.to_dense().eval(),
q.scale.to_dense().eval(),
rtol=0.1)
def test_divergence_between_identical_distributions_is_zero(self):
n = 1000
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
for alpha in [0.25, 0.75]:
negative_renyi_divergence = entropy.renyi_ratio(
log_p=q.log_prob, q=q, n=n, alpha=alpha, seed=0)
self.assertEqual((2,), negative_renyi_divergence.get_shape())
self.assertAllClose(np.zeros(2), negative_renyi_divergence.eval())
class RenyiAlphaTest(test.TestCase):
def test_with_three_alphas(self):
with self.test_session():
for dtype in (dtypes.float32, dtypes.float64):
alpha_min = constant_op.constant(0.0, dtype=dtype)
alpha_max = 0.5
decay_time = 3
alpha_0 = entropy.renyi_alpha(
0, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_1 = entropy.renyi_alpha(
1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_2 = entropy.renyi_alpha(
2, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_3 = entropy.renyi_alpha(
3, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
# Alpha should start at alpha_max.
self.assertAllClose(alpha_max, alpha_0.eval(), atol=1e-5)
# Alpha should finish at alpha_min.
self.assertAllClose(alpha_min.eval(), alpha_3.eval(), atol=1e-5)
# In between, alpha should be monotonically decreasing.
_assert_monotonic_decreasing(
[alpha_0.eval(), alpha_1.eval(), alpha_2.eval(), alpha_3.eval()])
def test_non_scalar_input_raises(self):
with self.test_session():
# Good values here
step = 0
alpha_min = 0.0
alpha_max = 0.5
decay_time = 3
# Use one bad value inside each check.
# The "bad" value is always the non-scalar one.
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
[step], decay_time, alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, [decay_time], alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, decay_time, alpha_min=[alpha_min], alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, decay_time, alpha_min=alpha_min, alpha_max=[alpha_max]).eval()
def test_input_with_wrong_sign_raises(self):
with self.test_session():
# Good values here
step = 0
alpha_min = 0.0
alpha_max = 0.5
decay_time = 3
# Use one bad value inside each check.
# The "bad" value is always the non-scalar one.
with self.assertRaisesOpError('decay_time must be positive'):
entropy.renyi_alpha(
step, 0.0, alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesOpError('step must be non-negative'):
entropy.renyi_alpha(
-1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max).eval()
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: CURLINFO_NUM_CONNECTS
Section: 3
Source: libcurl
See-also:
- curl_easy_getinfo (3)
- curl_easy_setopt (3)
Protocol:
- All
Added-in: 7.12.3
---
# NAME
CURLINFO_NUM_CONNECTS - number of created connections
# SYNOPSIS
~~~c
#include <curl/curl.h>
CURLcode curl_easy_getinfo(CURL *handle, CURLINFO_NUM_CONNECTS, long *nump);
~~~
# DESCRIPTION
Pass a pointer to a long to receive how many new connections libcurl had to
create to achieve the previous transfer (only the successful connects are
counted). Combined with CURLINFO_REDIRECT_COUNT(3) you are able to know how
many times libcurl successfully reused existing connection(s) or not. See the
connection options of curl_easy_setopt(3) to see how libcurl tries to make
persistent connections to save time.
# %PROTOCOLS%
# EXAMPLE
~~~c
int main(void)
{
CURL *curl = curl_easy_init();
if(curl) {
CURLcode result;
curl_easy_setopt(curl, CURLOPT_URL, "https://example.com");
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
result = curl_easy_perform(curl);
if(result == CURLE_OK) {
long connects;
result = curl_easy_getinfo(curl, CURLINFO_NUM_CONNECTS, &connects);
if(result == CURLE_OK)
printf("It needed %ld connects\n", connects);
}
curl_easy_cleanup(curl);
}
}
~~~
# %AVAILABILITY%
# RETURN VALUE
curl_easy_getinfo(3) returns a CURLcode indicating success or error.
CURLE_OK (0) means everything was OK, non-zero means an error occurred, see
libcurl-errors(3). | unknown | github | https://github.com/curl/curl | docs/libcurl/opts/CURLINFO_NUM_CONNECTS.md |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.task;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
/**
* Callback interface that can be used to customize a {@link ThreadPoolTaskScheduler}.
*
* @author Stephane Nicoll
* @since 3.2.0
*/
@FunctionalInterface
public interface ThreadPoolTaskSchedulerCustomizer {
/**
* Callback to customize a {@link ThreadPoolTaskScheduler} instance.
* @param taskScheduler the task scheduler to customize
*/
void customize(ThreadPoolTaskScheduler taskScheduler);
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskSchedulerCustomizer.java |
# frozen_string_literal: true
module ActiveRecord
module ConnectionAdapters
class PoolConfig # :nodoc:
include MonitorMixin
attr_reader :db_config, :role, :shard, :connection_descriptor
attr_writer :schema_reflection, :server_version
def schema_reflection
@schema_reflection ||= SchemaReflection.new(db_config.lazy_schema_cache_path)
end
INSTANCES = ObjectSpace::WeakMap.new
private_constant :INSTANCES
class << self
def discard_pools!
INSTANCES.each_key(&:discard_pool!)
end
def disconnect_all!
INSTANCES.each_key { |c| c.disconnect!(automatic_reconnect: true) }
end
end
def initialize(connection_class, db_config, role, shard)
super()
@server_version = nil
self.connection_descriptor = connection_class
@db_config = db_config
@role = role
@shard = shard
@pool = nil
INSTANCES[self] = self
end
def server_version(connection)
@server_version || synchronize { @server_version ||= connection.get_database_version }
end
def connection_descriptor=(connection_descriptor)
case connection_descriptor
when ConnectionHandler::ConnectionDescriptor
@connection_descriptor = connection_descriptor
else
@connection_descriptor = ConnectionHandler::ConnectionDescriptor.new(connection_descriptor.name, connection_descriptor.primary_class?)
end
end
def disconnect!(automatic_reconnect: false)
return unless @pool
synchronize do
return unless @pool
@pool.automatic_reconnect = automatic_reconnect
@pool.disconnect!
end
nil
end
def pool
@pool || synchronize { @pool ||= ConnectionAdapters::ConnectionPool.new(self) }
end
def discard_pool!
return unless @pool
synchronize do
return unless @pool
@pool.discard!
@pool = nil
end
end
end
end
end
ActiveSupport::ForkTracker.after_fork { ActiveRecord::ConnectionAdapters::PoolConfig.discard_pools! } | ruby | github | https://github.com/rails/rails | activerecord/lib/active_record/connection_adapters/pool_config.rb |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def _AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops._avg_pool3d_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
class Pooling3DTest(XLATestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.arange(1.0, total_size + 1, dtype=np.float32)
x = x.reshape(input_sizes)
with self.test_session() as sess, self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = pool_func(
inputs,
ksize=[1] + window + [1],
strides=[1] + strides + [1],
padding=padding)
vals = sess.run(t, {inputs: x})
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=[1, 2, 2],
strides=[1, 2, 2],
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _VerifyGradient(self, pool_func, pool_grad_func, input_sizes, ksize,
strides, padding):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
"""
ksize = [1] + ksize + [1]
strides = [1] + strides + [1]
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)
with self.test_session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device("CPU"):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding)
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device("CPU"):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
actual_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
actual = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
rtol=1e-5,
atol=1e-6)
self.assertShapeEqual(actual, inputs)
def testMaxPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[1, 3, 3, 3, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID")
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[2, 3, 3, 6, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testMaxPoolGradValidPadding2_1_7_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[2, 3, 5, 7, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID")
def testMaxPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME")
def testMaxPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME")
def testMaxPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME")
def testMaxPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops._max_pool3d_grad,
input_sizes=[1, 3, 3, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 2, 4, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 2, 2, 2, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 3, 6, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME")
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
// RUN: %check_clang_tidy %s cppcoreguidelines-pro-type-cstyle-cast %t
void reinterpretcast() {
int i = 0;
void *j;
j = (int*)j;
// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: do not use C-style cast to convert between unrelated types [cppcoreguidelines-pro-type-cstyle-cast]
}
void constcast() {
int* i;
const int* j;
i = (int*)j;
// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: do not use C-style cast to cast away constness
j = (const int*)i; // OK, const added
(void)j; // OK, not a const_cast
}
void const_and_reinterpret() {
int* i;
const void* j;
i = (int*)j;
// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: do not use C-style cast to convert between unrelated types
}
class Base {
};
class Derived : public Base {
};
class Base2 {
};
class MultiDerived : public Base, public Base2 {
};
class PolymorphicBase {
public:
virtual ~PolymorphicBase();
};
class PolymorphicDerived : public PolymorphicBase {
};
class PolymorphicMultiDerived : public Base, public PolymorphicBase {
};
void pointers() {
auto P0 = (Derived*)new Base();
// CHECK-MESSAGES: :[[@LINE-1]]:13: warning: do not use C-style cast to downcast from a base to a derived class
const Base* B0;
auto PC0 = (const Derived*)(B0);
// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: do not use C-style cast to downcast from a base to a derived class
auto P1 = (Base*)new Derived(); // OK, upcast to a public base
auto P2 = (Base*)new MultiDerived(); // OK, upcast to a public base
auto P3 = (Base2*)new MultiDerived(); // OK, upcast to a public base
}
void pointers_polymorphic() {
auto PP0 = (PolymorphicDerived*)new PolymorphicBase();
// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: do not use C-style cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES: auto PP0 = dynamic_cast<PolymorphicDerived*>(new PolymorphicBase());
const PolymorphicBase* B0;
auto PPC0 = (const PolymorphicDerived*)B0;
// CHECK-MESSAGES: :[[@LINE-1]]:15: warning: do not use C-style cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES: auto PPC0 = dynamic_cast<const PolymorphicDerived*>(B0);
auto B1 = (PolymorphicBase*)new PolymorphicDerived(); // OK, upcast to a public base
auto B2 = (PolymorphicBase*)new PolymorphicMultiDerived(); // OK, upcast to a public base
auto B3 = (Base*)new PolymorphicMultiDerived(); // OK, upcast to a public base
}
void arrays() {
Base ArrayOfBase[10];
auto A0 = (Derived*)ArrayOfBase;
// CHECK-MESSAGES: :[[@LINE-1]]:13: warning: do not use C-style cast to downcast from a base to a derived class
}
void arrays_polymorphic() {
PolymorphicBase ArrayOfPolymorphicBase[10];
auto AP0 = (PolymorphicDerived*)ArrayOfPolymorphicBase;
// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: do not use C-style cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES: auto AP0 = dynamic_cast<PolymorphicDerived*>(ArrayOfPolymorphicBase);
}
void references() {
Base B0;
auto R0 = (Derived&)B0;
// CHECK-MESSAGES: :[[@LINE-1]]:13: warning: do not use C-style cast to downcast from a base to a derived class
Base& RefToBase = B0;
auto R1 = (Derived&)RefToBase;
// CHECK-MESSAGES: :[[@LINE-1]]:13: warning: do not use C-style cast to downcast from a base to a derived class
const Base& ConstRefToBase = B0;
auto RC1 = (const Derived&)ConstRefToBase;
// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: do not use C-style cast to downcast from a base to a derived class
Derived RD1;
auto R2 = (Base&)RD1; // OK, upcast to a public base
}
void references_polymorphic() {
PolymorphicBase B0;
auto RP0 = (PolymorphicDerived&)B0;
// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: do not use C-style cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES: auto RP0 = dynamic_cast<PolymorphicDerived&>(B0);
PolymorphicBase& RefToPolymorphicBase = B0;
auto RP1 = (PolymorphicDerived&)RefToPolymorphicBase;
// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: do not use C-style cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES: auto RP1 = dynamic_cast<PolymorphicDerived&>(RefToPolymorphicBase);
const PolymorphicBase& ConstRefToPolymorphicBase = B0;
auto RPC2 = (const PolymorphicDerived&)(ConstRefToPolymorphicBase);
// CHECK-MESSAGES: :[[@LINE-1]]:15: warning: do not use C-style cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES: auto RPC2 = dynamic_cast<const PolymorphicDerived&>(ConstRefToPolymorphicBase);
PolymorphicDerived d1;
auto RP2 = (PolymorphicBase&)d1; // OK, upcast to a public base
}
template<class B, class D>
void templ() {
auto B0 = (B*)new D();
}
void templ_bad_call() {
templ<Derived, Base>(); //FIXME: this should trigger a warning
}
void templ_good_call() {
templ<Base, Derived>(); // OK, upcast to a public base
} | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-cstyle-cast.cpp |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.symbolDeclarationOverridesProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.symbolDeclarationOverridesProvider.AbstractIsSubclassOfTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleIsSubclassOfTestGenerated extends AbstractIsSubclassOfTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInIsSubclassOf() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("childClass.kt")
public void testChildClass() {
runTest("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf/childClass.kt");
}
@Test
@TestMetadata("grandParentClass.kt")
public void testGrandParentClass() {
runTest("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf/grandParentClass.kt");
}
@Test
@TestMetadata("localClasses.kt")
public void testLocalClasses() {
runTest("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf/localClasses.kt");
}
@Test
@TestMetadata("parentClass.kt")
public void testParentClass() {
runTest("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf/parentClass.kt");
}
@Test
@TestMetadata("sameClass.kt")
public void testSameClass() {
runTest("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf/sameClass.kt");
}
@Test
@TestMetadata("unrelatedClass.kt")
public void testUnrelatedClass() {
runTest("analysis/analysis-api/testData/components/symbolDeclarationOverridesProvider/isSubclassOf/unrelatedClass.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/symbolDeclarationOverridesProvider/FirIdeNormalAnalysisSourceModuleIsSubclassOfTestGenerated.java |
# (c) 2016, Ansible by Red Hat <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
def pct_to_int(value, num_items, min_value=1):
'''
Converts a given value to a percentage if specified as "x%",
otherwise converts the given value to an integer.
'''
if isinstance(value, string_types) and value.endswith('%'):
value_pct = int(value.replace("%",""))
return int((value_pct/100.0) * num_items) or min_value
else:
return int(value) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_9_2_4(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '9 2 4'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.math;
import static com.google.common.base.Preconditions.checkState;
import static java.lang.Double.NaN;
import static java.lang.Double.isFinite;
import static java.lang.Double.isNaN;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.primitives.Doubles;
/**
* A mutable object which accumulates paired double values (e.g. points on a plane) and tracks some
* basic statistics over all the values added so far. This class is not thread safe.
*
* @author Pete Gillin
* @since 20.0
*/
@J2ktIncompatible
@GwtIncompatible
public final class PairedStatsAccumulator {
/** Creates a new accumulator. */
public PairedStatsAccumulator() {}
// These fields must satisfy the requirements of PairedStats' constructor as well as those of the
// stat methods of this class.
private final StatsAccumulator xStats = new StatsAccumulator();
private final StatsAccumulator yStats = new StatsAccumulator();
private double sumOfProductsOfDeltas = 0.0;
/** Adds the given pair of values to the dataset. */
public void add(double x, double y) {
// We extend the recursive expression for the one-variable case at Art of Computer Programming
// vol. 2, Knuth, 4.2.2, (16) to the two-variable case. We have two value series x_i and y_i.
// We define the arithmetic means X_n = 1/n \sum_{i=1}^n x_i, and Y_n = 1/n \sum_{i=1}^n y_i.
// We also define the sum of the products of the differences from the means
// C_n = \sum_{i=1}^n x_i y_i - n X_n Y_n
// for all n >= 1. Then for all n > 1:
// C_{n-1} = \sum_{i=1}^{n-1} x_i y_i - (n-1) X_{n-1} Y_{n-1}
// C_n - C_{n-1} = x_n y_n - n X_n Y_n + (n-1) X_{n-1} Y_{n-1}
// = x_n y_n - X_n [ y_n + (n-1) Y_{n-1} ] + [ n X_n - x_n ] Y_{n-1}
// = x_n y_n - X_n y_n - x_n Y_{n-1} + X_n Y_{n-1}
// = (x_n - X_n) (y_n - Y_{n-1})
xStats.add(x);
if (isFinite(x) && isFinite(y)) {
if (xStats.count() > 1) {
sumOfProductsOfDeltas += (x - xStats.mean()) * (y - yStats.mean());
}
} else {
sumOfProductsOfDeltas = NaN;
}
yStats.add(y);
}
/**
* Adds the given statistics to the dataset, as if the individual values used to compute the
* statistics had been added directly.
*/
public void addAll(PairedStats values) {
if (values.count() == 0) {
return;
}
xStats.addAll(values.xStats());
if (yStats.count() == 0) {
sumOfProductsOfDeltas = values.sumOfProductsOfDeltas();
} else {
// This is a generalized version of the calculation in add(double, double) above. Note that
// non-finite inputs will have sumOfProductsOfDeltas = NaN, so non-finite values will result
// in NaN naturally.
sumOfProductsOfDeltas +=
values.sumOfProductsOfDeltas()
+ (values.xStats().mean() - xStats.mean())
* (values.yStats().mean() - yStats.mean())
* values.count();
}
yStats.addAll(values.yStats());
}
/** Returns an immutable snapshot of the current statistics. */
public PairedStats snapshot() {
return new PairedStats(xStats.snapshot(), yStats.snapshot(), sumOfProductsOfDeltas);
}
/** Returns the number of pairs in the dataset. */
public long count() {
return xStats.count();
}
/** Returns an immutable snapshot of the statistics on the {@code x} values alone. */
public Stats xStats() {
return xStats.snapshot();
}
/** Returns an immutable snapshot of the statistics on the {@code y} values alone. */
public Stats yStats() {
return yStats.snapshot();
}
/**
* Returns the population covariance of the values. The count must be non-zero.
*
* <p>This is guaranteed to return zero if the dataset contains a single pair of finite values. It
* is not guaranteed to return zero when the dataset consists of the same pair of values multiple
* times, due to numerical errors.
*
* <h3>Non-finite values</h3>
*
* <p>If the dataset contains any non-finite values ({@link Double#POSITIVE_INFINITY}, {@link
* Double#NEGATIVE_INFINITY}, or {@link Double#NaN}) then the result is {@link Double#NaN}.
*
* @throws IllegalStateException if the dataset is empty
*/
public double populationCovariance() {
checkState(count() != 0);
return sumOfProductsOfDeltas / count();
}
/**
* Returns the sample covariance of the values. The count must be greater than one.
*
* <p>This is not guaranteed to return zero when the dataset consists of the same pair of values
* multiple times, due to numerical errors.
*
* <h3>Non-finite values</h3>
*
* <p>If the dataset contains any non-finite values ({@link Double#POSITIVE_INFINITY}, {@link
* Double#NEGATIVE_INFINITY}, or {@link Double#NaN}) then the result is {@link Double#NaN}.
*
* @throws IllegalStateException if the dataset is empty or contains a single pair of values
*/
public final double sampleCovariance() {
checkState(count() > 1);
return sumOfProductsOfDeltas / (count() - 1);
}
/**
* Returns the <a href="http://mathworld.wolfram.com/CorrelationCoefficient.html">Pearson's or
* product-moment correlation coefficient</a> of the values. The count must greater than one, and
* the {@code x} and {@code y} values must both have non-zero population variance (i.e. {@code
* xStats().populationVariance() > 0.0 && yStats().populationVariance() > 0.0}). The result is not
* guaranteed to be exactly +/-1 even when the data are perfectly (anti-)correlated, due to
* numerical errors. However, it is guaranteed to be in the inclusive range [-1, +1].
*
* <h3>Non-finite values</h3>
*
* <p>If the dataset contains any non-finite values ({@link Double#POSITIVE_INFINITY}, {@link
* Double#NEGATIVE_INFINITY}, or {@link Double#NaN}) then the result is {@link Double#NaN}.
*
* @throws IllegalStateException if the dataset is empty or contains a single pair of values, or
* either the {@code x} and {@code y} dataset has zero population variance
*/
public final double pearsonsCorrelationCoefficient() {
checkState(count() > 1);
if (isNaN(sumOfProductsOfDeltas)) {
return NaN;
}
double xSumOfSquaresOfDeltas = xStats.sumOfSquaresOfDeltas();
double ySumOfSquaresOfDeltas = yStats.sumOfSquaresOfDeltas();
checkState(xSumOfSquaresOfDeltas > 0.0);
checkState(ySumOfSquaresOfDeltas > 0.0);
// The product of two positive numbers can be zero if the multiplication underflowed. We
// force a positive value by effectively rounding up to MIN_VALUE.
double productOfSumsOfSquaresOfDeltas =
ensurePositive(xSumOfSquaresOfDeltas * ySumOfSquaresOfDeltas);
return ensureInUnitRange(sumOfProductsOfDeltas / Math.sqrt(productOfSumsOfSquaresOfDeltas));
}
/**
* Returns a linear transformation giving the best fit to the data according to <a
* href="http://mathworld.wolfram.com/LeastSquaresFitting.html">Ordinary Least Squares linear
* regression</a> of {@code y} as a function of {@code x}. The count must be greater than one, and
* either the {@code x} or {@code y} data must have a non-zero population variance (i.e. {@code
* xStats().populationVariance() > 0.0 || yStats().populationVariance() > 0.0}). The result is
* guaranteed to be horizontal if there is variance in the {@code x} data but not the {@code y}
* data, and vertical if there is variance in the {@code y} data but not the {@code x} data.
*
* <p>This fit minimizes the root-mean-square error in {@code y} as a function of {@code x}. This
* error is defined as the square root of the mean of the squares of the differences between the
* actual {@code y} values of the data and the values predicted by the fit for the {@code x}
* values (i.e. it is the square root of the mean of the squares of the vertical distances between
* the data points and the best fit line). For this fit, this error is a fraction {@code sqrt(1 -
* R*R)} of the population standard deviation of {@code y}, where {@code R} is the Pearson's
* correlation coefficient (as given by {@link #pearsonsCorrelationCoefficient()}).
*
* <p>The corresponding root-mean-square error in {@code x} as a function of {@code y} is a
* fraction {@code sqrt(1/(R*R) - 1)} of the population standard deviation of {@code x}. This fit
* does not normally minimize that error: to do that, you should swap the roles of {@code x} and
* {@code y}.
*
* <h3>Non-finite values</h3>
*
* <p>If the dataset contains any non-finite values ({@link Double#POSITIVE_INFINITY}, {@link
* Double#NEGATIVE_INFINITY}, or {@link Double#NaN}) then the result is {@link
* LinearTransformation#forNaN()}.
*
* @throws IllegalStateException if the dataset is empty or contains a single pair of values, or
* both the {@code x} and {@code y} dataset have zero population variance
*/
public final LinearTransformation leastSquaresFit() {
checkState(count() > 1);
if (isNaN(sumOfProductsOfDeltas)) {
return LinearTransformation.forNaN();
}
double xSumOfSquaresOfDeltas = xStats.sumOfSquaresOfDeltas();
if (xSumOfSquaresOfDeltas > 0.0) {
if (yStats.sumOfSquaresOfDeltas() > 0.0) {
return LinearTransformation.mapping(xStats.mean(), yStats.mean())
.withSlope(sumOfProductsOfDeltas / xSumOfSquaresOfDeltas);
} else {
return LinearTransformation.horizontal(yStats.mean());
}
} else {
checkState(yStats.sumOfSquaresOfDeltas() > 0.0);
return LinearTransformation.vertical(xStats.mean());
}
}
private static double ensurePositive(double value) {
if (value > 0.0) {
return value;
} else {
return Double.MIN_VALUE;
}
}
private static double ensureInUnitRange(double value) {
return Doubles.constrainToRange(value, -1.0, 1.0);
}
} | java | github | https://github.com/google/guava | android/guava/src/com/google/common/math/PairedStatsAccumulator.java |
##########################################################################
#
# Copyright (c) 2019, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import Gaffer
import GafferTest
class MonitorAlgoTest( GafferTest.TestCase ) :
def testAnnotate( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n1"] = GafferTest.AddNode()
s["b"]["n1"]["op1"].setValue( 10024 )
s["b"]["n2"] = GafferTest.AddNode()
s["b"]["n2"]["op1"].setValue( 10023 )
with Gaffer.PerformanceMonitor() as m :
with Gaffer.Context() as c :
s["b"]["n1"]["sum"].getValue()
s["b"]["n2"]["sum"].getValue()
c.setFrame( 2 )
s["b"]["n1"]["sum"].getValue()
Gaffer.MonitorAlgo.annotate( s, m, Gaffer.MonitorAlgo.PerformanceMetric.ComputeCount )
self.assertEqual(
Gaffer.Metadata.value( s["b"]["n1"], "annotation:performanceMonitor:computeCount:text" ),
"Compute count : 1"
)
self.assertEqual(
Gaffer.Metadata.value( s["b"]["n2"], "annotation:performanceMonitor:computeCount:text" ),
"Compute count : 1"
)
self.assertEqual(
Gaffer.Metadata.value( s["b"], "annotation:performanceMonitor:computeCount:text" ),
"Compute count : 2"
)
Gaffer.MonitorAlgo.annotate( s, m, Gaffer.MonitorAlgo.PerformanceMetric.HashesPerCompute )
self.assertEqual(
Gaffer.Metadata.value( s["b"]["n1"], "annotation:performanceMonitor:hashesPerCompute:text" ),
"Hashes per compute : 2"
)
self.assertEqual(
Gaffer.Metadata.value( s["b"]["n2"], "annotation:performanceMonitor:hashesPerCompute:text" ),
"Hashes per compute : 1"
)
self.assertEqual(
Gaffer.Metadata.value( s["b"], "annotation:performanceMonitor:hashesPerCompute:text" ),
"Hashes per compute : 1.5"
)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
from itertools import groupby
from pkg_resources import iter_entry_points
from httpie.plugins import AuthPlugin, FormatterPlugin, ConverterPlugin
from httpie.plugins.base import TransportPlugin
ENTRY_POINT_NAMES = [
'httpie.plugins.auth.v1',
'httpie.plugins.formatter.v1',
'httpie.plugins.converter.v1',
'httpie.plugins.transport.v1',
]
class PluginManager(object):
def __init__(self):
self._plugins = []
def __iter__(self):
return iter(self._plugins)
def register(self, *plugins):
for plugin in plugins:
self._plugins.append(plugin)
def load_installed_plugins(self):
for entry_point_name in ENTRY_POINT_NAMES:
for entry_point in iter_entry_points(entry_point_name):
plugin = entry_point.load()
plugin.package_name = entry_point.dist.key
self.register(entry_point.load())
# Auth
def get_auth_plugins(self):
return [plugin for plugin in self if issubclass(plugin, AuthPlugin)]
def get_auth_plugin_mapping(self):
return dict((plugin.auth_type, plugin)
for plugin in self.get_auth_plugins())
def get_auth_plugin(self, auth_type):
return self.get_auth_plugin_mapping()[auth_type]
# Output processing
def get_formatters(self):
return [plugin for plugin in self
if issubclass(plugin, FormatterPlugin)]
def get_formatters_grouped(self):
groups = {}
for group_name, group in groupby(
self.get_formatters(),
key=lambda p: getattr(p, 'group_name', 'format')):
groups[group_name] = list(group)
return groups
def get_converters(self):
return [plugin for plugin in self
if issubclass(plugin, ConverterPlugin)]
# Adapters
def get_transport_plugins(self):
return [plugin for plugin in self
if issubclass(plugin, TransportPlugin)] | unknown | codeparrot/codeparrot-clean | ||
import unittest
import tkp.db
from tkp.testutil import db_subs
class TestAugmentedRunningcatalog(unittest.TestCase):
def setUp(self):
"""
create a fake transient. Taken from the transient test.
:return:
"""
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(data={'description':
"Augmented Runningcatalog test"},
database=self.database)
self.n_images = 4
self.new_source_sigma_margin = 3
image_rms = 1e-3
detection_thresh = 10
self.search_params = {'eta_min': 1, 'v_min': 0.1}
self.barely_detectable_flux = 1.01 * image_rms * detection_thresh
self.reliably_detectable_flux = 1.01 * image_rms * (detection_thresh +
self.new_source_sigma_margin)
# 1mJy image RMS, 10-sigma detection threshold = 10mJy threshold.
test_specific_img_params = {'rms_qc': image_rms, 'rms_min': image_rms,
'rms_max': image_rms,
'detection_thresh': detection_thresh}
self.im_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
im_params = self.im_params
src_tuple = db_subs.example_extractedsource_tuple(ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],)
transient_src = db_subs.MockSource(
template_extractedsource=src_tuple,
lightcurve={im_params[2]['taustart_ts']:
self.reliably_detectable_flux}
)
for img_pars in im_params:
db_subs.insert_image_and_simulated_sources(self.dataset, img_pars,
[transient_src],
self.new_source_sigma_margin)
def tearDown(self):
tkp.db.rollback()
def test_extra_columns(self):
query = """
SELECT
v_int, eta_int,
sigma_rms_max, sigma_rms_min,
lightcurve_max, lightcurve_avg
FROM
augmented_runningcatalog
WHERE
dataset = %s
ORDER BY
id
""" % self.dataset.id
cursor = tkp.db.execute(query)
rows = cursor.fetchall()
self.assertEqual(len(rows), 1)
v_int, eta_int, sigma_max, sigma_min, lightcurve_max, lightcurve_avg = rows[0]
self.assertAlmostEqual(v_int, 1.41421356237309)
self.assertAlmostEqual(eta_int, 344.7938)
self.assertAlmostEqual(sigma_max, 13.13)
self.assertAlmostEqual(sigma_min, 13.13)
self.assertAlmostEqual(lightcurve_max, 0.01313)
self.assertAlmostEqual(lightcurve_avg, 0.006565)
@unittest.skip(
"""
This test fails when we mix the old "augmented runningcatalog" and
the new SQLAlchemy code. It's unclear why it's suddenly borked, but
since the relevant query is about to be reimplemented we skip it for
now and will debug the new version.
""")
def test_count(self):
"""
make sure the augmented view has a reasonable number of rows.
"""
n_runcats_qry = "select count(id) from runningcatalog"
n_runcat_flux_qry = "select count(id) from runningcatalog_flux"
n_in_view_qry = "select count(id) from augmented_runningcatalog"
n_runcats = tkp.db.execute(n_runcats_qry).fetchall()[0][0]
n_runcat_flux = tkp.db.execute(n_runcat_flux_qry).fetchall()[0][0]
n_in_view = tkp.db.execute(n_in_view_qry).fetchall()[0][0]
self.assertGreaterEqual(n_in_view, n_runcats)
self.assertGreaterEqual(n_runcat_flux, n_in_view) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: false
$INCFLAGS << " -I$(topdir) -I$(top_srcdir)"
$srcs = Dir[File.join($srcdir, "*.{#{SRC_EXT.join(%q{,})}}")]
inits = $srcs.map {|s| File.basename(s, ".*")}
inits.delete("init")
inits.map! {|s|"X(#{s})"}
$defs << "-DTEST_INIT_FUNCS(X)=\"#{inits.join(' ')}\""
create_makefile("-test-/rational") | ruby | github | https://github.com/ruby/ruby | ext/-test-/rational/extconf.rb |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 추론을 위한 Pipeline[[pipelines-for-inference]]
[`pipeline`]을 사용하면 언어, 컴퓨터 비전, 오디오 및 멀티모달 태스크에 대한 추론을 위해 [Hub](https://huggingface.co/models)의 어떤 모델이든 쉽게 사용할 수 있습니다. 특정 분야에 대한 경험이 없거나, 모델을 이루는 코드가 익숙하지 않은 경우에도 [`pipeline`]을 사용해서 추론할 수 있어요! 이 튜토리얼에서는 다음을 배워보겠습니다.
* 추론을 위해 [`pipeline`]을 사용하는 방법
* 특정 토크나이저 또는 모델을 사용하는 방법
* 언어, 컴퓨터 비전, 오디오 및 멀티모달 태스크에서 [`pipeline`]을 사용하는 방법
<Tip>
지원하는 모든 태스크와 쓸 수 있는 매개변수를 담은 목록은 [`pipeline`] 설명서를 참고해주세요.
</Tip>
## Pipeline 사용하기[[pipeline-usage]]
각 태스크마다 고유의 [`pipeline`]이 있지만, 개별 파이프라인을 담고있는 추상화된 [`pipeline`]를 사용하는 것이 일반적으로 더 간단합니다. [`pipeline`]은 태스크에 알맞게 추론이 가능한 기본 모델과 전처리 클래스를 자동으로 로드합니다.
1. 먼저 [`pipeline`]을 생성하고 태스크를 지정하세요.
```py
>>> from transformers import pipeline
>>> generator = pipeline(task="automatic-speech-recognition")
```
2. 그리고 [`pipeline`]에 입력을 넣어주세요.
```py
>>> generator("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'}
```
기대했던 결과가 아닌가요? Hub에서 [가장 많이 다운로드된 자동 음성 인식 모델](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads)로 더 나은 결과를 얻을 수 있는지 확인해보세요.
다음은 [openai/whisper-large](https://huggingface.co/openai/whisper-large)로 시도해보겠습니다.
```py
>>> generator = pipeline(model="openai/whisper-large")
>>> generator("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}
```
훨씬 더 나아졌군요!
Hub의 모델들은 여러 다양한 언어와 전문분야를 아우르기 때문에 꼭 자신의 언어나 분야에 특화된 모델을 찾아보시기 바랍니다.
브라우저를 벗어날 필요없이 Hub에서 직접 모델의 출력을 확인하고 다른 모델과 비교해서 자신의 상황에 더 적합한지, 애매한 입력을 더 잘 처리하는지도 확인할 수 있습니다.
만약 상황에 알맞는 모델을 없다면 언제나 직접 [훈련](training)시킬 수 있습니다!
입력이 여러 개 있는 경우, 리스트 형태로 전달할 수 있습니다.
```py
generator(
[
"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac",
"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac",
]
)
```
전체 데이터세트을 순회하거나 웹서버에 올려두어 추론에 사용하고 싶다면, 각 상세 페이지를 참조하세요.
[데이터세트에서 Pipeline 사용하기](#using-pipelines-on-a-dataset)
[웹서버에서 Pipeline 사용하기](./pipeline_webserver)
## 매개변수[[parameters]]
[`pipeline`]은 많은 매개변수를 지원합니다. 특정 태스크용인 것도 있고, 범용인 것도 있습니다.
일반적으로 원하는 위치에 어디든 매개변수를 넣을 수 있습니다.
```py
generator(model="openai/whisper-large", my_parameter=1)
out = generate(...) # This will use `my_parameter=1`.
out = generate(..., my_parameter=2) # This will override and use `my_parameter=2`.
out = generate(...) # This will go back to using `my_parameter=1`.
```
중요한 3가지 매개변수를 살펴보겠습니다.
### 기기(device)[[device]]
`device=n`처럼 기기를 지정하면 파이프라인이 자동으로 해당 기기에 모델을 배치합니다.
파이토치에서나 텐서플로우에서도 모두 작동합니다.
```py
generator(model="openai/whisper-large", device=0)
```
모델이 GPU 하나에 돌아가기 버겁다면, `device_map="auto"`를 지정해서 🤗 [Accelerate](https://huggingface.co/docs/accelerate)가 모델 가중치를 어떻게 로드하고 저장할지 자동으로 결정하도록 할 수 있습니다.
```py
#!pip install accelerate
generator(model="openai/whisper-large", device_map="auto")
```
### 배치 사이즈[[batch-size]]
기본적으로 파이프라인은 [여기](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching)에 나온 이유로 추론을 일괄 처리하지 않습니다. 간단히 설명하자면 일괄 처리가 반드시 더 빠르지 않고 오히려 더 느려질 수도 있기 때문입니다.
하지만 자신의 상황에 적합하다면, 이렇게 사용하세요.
```py
generator(model="openai/whisper-large", device=0, batch_size=2)
audio_filenames = [f"audio_{i}.flac" for i in range(10)]
texts = generator(audio_filenames)
```
파이프라인 위 제공된 10개의 오디오 파일을 추가로 처리하는 코드 없이 (일괄 처리에 보다 효과적인 GPU 위) 모델에 2개씩 전달합니다.
출력은 일괄 처리하지 않았을 때와 똑같아야 합니다. 파이프라인에서 속도를 더 낼 수도 있는 방법 중 하나일 뿐입니다.
파이프라인은 일괄 처리의 복잡한 부분을 줄여주기도 합니다. (예를 들어 긴 오디오 파일처럼) 여러 부분으로 나눠야 모델이 처리할 수 있는 것을 [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching)이라고 하는데, 파이프라인을 사용하면 자동으로 나눠줍니다.
### 특정 태스크용 매개변수[[task-specific-parameters]]
각 태스크마다 구현할 때 유연성과 옵션을 제공하기 위해 태스크용 매개변수가 있습니다.
예를 들어 [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] 메서드에는 동영상의 자막을 넣을 때 유용할 것 같은 `return_timestamps` 매개변수가 있습니다.
```py
>>> # Not using whisper, as it cannot provide timestamps.
>>> generator = pipeline(model="facebook/wav2vec2-large-960h-lv60-self", return_timestamps="word")
>>> generator("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP AND LIVE OUT THE TRUE MEANING OF ITS CREED', 'chunks': [{'text': 'I', 'timestamp': (1.22, 1.24)}, {'text': 'HAVE', 'timestamp': (1.42, 1.58)}, {'text': 'A', 'timestamp': (1.66, 1.68)}, {'text': 'DREAM', 'timestamp': (1.76, 2.14)}, {'text': 'BUT', 'timestamp': (3.68, 3.8)}, {'text': 'ONE', 'timestamp': (3.94, 4.06)}, {'text': 'DAY', 'timestamp': (4.16, 4.3)}, {'text': 'THIS', 'timestamp': (6.36, 6.54)}, {'text': 'NATION', 'timestamp': (6.68, 7.1)}, {'text': 'WILL', 'timestamp': (7.32, 7.56)}, {'text': 'RISE', 'timestamp': (7.8, 8.26)}, {'text': 'UP', 'timestamp': (8.38, 8.48)}, {'text': 'AND', 'timestamp': (10.08, 10.18)}, {'text': 'LIVE', 'timestamp': (10.26, 10.48)}, {'text': 'OUT', 'timestamp': (10.58, 10.7)}, {'text': 'THE', 'timestamp': (10.82, 10.9)}, {'text': 'TRUE', 'timestamp': (10.98, 11.18)}, {'text': 'MEANING', 'timestamp': (11.26, 11.58)}, {'text': 'OF', 'timestamp': (11.66, 11.7)}, {'text': 'ITS', 'timestamp': (11.76, 11.88)}, {'text': 'CREED', 'timestamp': (12.0, 12.38)}]}
```
보시다시피 모델이 텍스트를 추론할 뿐만 아니라 각 단어를 말한 시점까지도 출력했습니다.
태스크마다 다양한 매개변수를 가지고 있는데요. 원하는 태스크의 API를 참조해서 바꿔볼 수 있는 여러 매개변수를 살펴보세요!
지금까지 다뤄본 [`~transformers.AutomaticSpeechRecognitionPipeline`]에는 `chunk_length_s` 매개변수가 있습니다. 영화나 1시간 분량의 동영상의 자막 작업을 할 때처럼, 일반적으로 모델이 자체적으로 처리할 수 없는 매우 긴 오디오 파일을 처리할 때 유용하죠.
도움이 될 만한 매개변수를 찾지 못했다면 언제든지 [요청](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)해주세요!
## 데이터세트에서 Pipeline 사용하기[[using-pipelines-on-a-dataset]]
파이프라인은 대규모 데이터세트에서도 추론 작업을 할 수 있습니다. 이때 이터레이터를 사용하는 걸 추천드립니다.
```py
def data():
for i in range(1000):
yield f"My example {i}"
pipe = pipe(model="openai-community/gpt2", device=0)
generated_characters = 0
for out in pipe(data()):
generated_characters += len(out["generated_text"])
```
이터레이터 `data()`는 각 결과를 호출마다 생성하고, 파이프라인은 입력이 순회할 수 있는 자료구조임을 자동으로 인식하여 GPU에서 기존 데이터가 처리되는 동안 새로운 데이터를 가져오기 시작합니다.(이때 내부적으로 [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)를 사용해요.) 이 과정은 전체 데이터세트를 메모리에 적재하지 않고도 GPU에 최대한 빠르게 새로운 작업을 공급할 수 있기 때문에 중요합니다.
그리고 일괄 처리가 더 빠를 수 있기 때문에, `batch_size` 매개변수를 조정해봐도 좋아요.
데이터세트를 순회하는 가장 간단한 방법은 🤗 [Datasets](https://github.com/huggingface/datasets/)를 활용하는 것인데요.
```py
# KeyDataset is a util that will just output the item we're interested in.
from transformers.pipelines.pt_utils import KeyDataset
pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]")
for out in pipe(KeyDataset(dataset["audio"])):
print(out)
```
## 웹서버에서 Pipeline 사용하기[[using-pipelines-for-a-webserver]]
<Tip>
추론 엔진을 만드는 과정은 따로 페이지를 작성할만한 복잡한 주제입니다.
</Tip>
[Link](./pipeline_webserver)
## 비전 Pipeline[[vision-pipeline]]
비전 태스크를 위해 [`pipeline`]을 사용하는 일은 거의 동일합니다.
태스크를 지정하고 이미지를 분류기에 전달하면 됩니다. 이미지는 인터넷 링크 또는 로컬 경로의 형태로 전달해주세요. 예를 들어 아래에 표시된 고양이는 어떤 종인가요?

```py
>>> from transformers import pipeline
>>> vision_classifier = pipeline(model="google/vit-base-patch16-224")
>>> preds = vision_classifier(
... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> preds
[{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}]
```
### 텍스트 Pipeline[[text-pipeline]]
NLP 태스크를 위해 [`pipeline`]을 사용하는 일도 거의 동일합니다.
```py
>>> from transformers import pipeline
>>> # This model is a `zero-shot-classification` model.
>>> # It will classify text, except you are free to choose any label you might imagine
>>> classifier = pipeline(model="facebook/bart-large-mnli")
>>> classifier(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
```
### 멀티모달 Pipeline[[multimodal-pipeline]]
[`pipeline`]은 여러 모달리티(역주: 오디오, 비디오, 텍스트와 같은 데이터 형태)를 지원합니다. 예시로 시각적 질의응답(VQA; Visual Question Answering) 태스크는 텍스트와 이미지를 모두 사용합니다. 그 어떤 이미지 링크나 묻고 싶은 질문도 자유롭게 전달할 수 있습니다. 이미지는 URL 또는 로컬 경로의 형태로 전달해주세요.
예를 들어 이 [거래명세서 사진](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png)에서 거래명세서 번호를 묻고 싶다면,
```py
>>> from transformers import pipeline
>>> vqa = pipeline(model="impira/layoutlm-document-qa")
>>> vqa(
... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
... question="What is the invoice number?",
... )
[{'score': 0.42514941096305847, 'answer': 'us-001', 'start': 16, 'end': 16}]
``` | unknown | github | https://github.com/huggingface/transformers | docs/source/ko/pipeline_tutorial.md |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid import AnalysisDataService
from mantid.api import AlgorithmPropertyWithValue
from mantid.simpleapi import SumSpectra, ConvertAxesToRealSpace
from sans.algorithm_detail.batch_execution import provide_loaded_data, create_unmanaged_algorithm, add_to_group
from sans.algorithm_detail.crop_helper import get_component_name
from sans.algorithm_detail.mask_sans_workspace import mask_workspace
from sans.common.constants import EMPTY_NAME
from sans.common.enums import IntegralEnum, DetectorType, SANSDataType
from sans.common.file_information import get_instrument_paths_for_sans_file
from sans.common.general_functions import parse_diagnostic_settings
from sans.common.xml_parsing import get_named_elements_from_ipf_file
from sans.gui_logic.models.RowEntries import RowEntries
from sans.gui_logic.plotting import get_plotting_module
from sans.gui_logic.presenter.gui_state_director import GuiStateDirector
def run_integral(integral_ranges, mask, integral, detector, state):
ranges = parse_range(integral_ranges)
input_workspaces = load_workspace(state)
is_multi_range = len (ranges) > 1
output_workspaces = []
for input_workspace in input_workspaces:
input_workspace_name = input_workspace.name()
if is_multi_range:
AnalysisDataService.remove(input_workspace_name + '_ranges')
input_workspace = crop_workspace(detector.value, input_workspace)
if mask:
input_workspace = apply_mask(state, input_workspace, detector.value)
x_dim, y_dim = get_detector_size_from_sans_file(state, detector)
output_workspace = integrate_ranges(ranges, integral, mask, detector, input_workspace_name, input_workspace, x_dim, y_dim,
is_multi_range)
plot_graph(output_workspace)
output_workspaces.append(output_workspace)
return output_workspaces
def integrate_ranges(ranges, integral, mask, detector, input_workspace_name, input_workspace, x_dim, y_dim, is_multi_range):
for integral_range in ranges:
output_workspace = generate_output_workspace_name(integral_range, integral, mask, detector, input_workspace_name)
output_workspace = run_algorithm(input_workspace, integral_range, integral, output_workspace, x_dim, y_dim)
if is_multi_range:
add_to_group(output_workspace, input_workspace_name + '_ranges')
if is_multi_range:
return AnalysisDataService.retrieve(input_workspace_name + '_ranges')
else:
return output_workspace
def parse_range(range):
if range:
return parse_diagnostic_settings(range)
else:
return [[0, AlgorithmPropertyWithValue.EMPTY_INT]]
def load_workspace(state):
workspace_to_name = {SANSDataType.SAMPLE_SCATTER: "SampleScatterWorkspace",
SANSDataType.SAMPLE_TRANSMISSION: "SampleTransmissionWorkspace",
SANSDataType.SAMPLE_DIRECT: "SampleDirectWorkspace",
SANSDataType.CAN_SCATTER: "CanScatterWorkspace",
SANSDataType.CAN_TRANSMISSION: "CanTransmissionWorkspace",
SANSDataType.CAN_DIRECT: "CanDirectWorkspace"}
workspace_to_monitor = {SANSDataType.SAMPLE_SCATTER: "SampleScatterMonitorWorkspace",
SANSDataType.CAN_SCATTER: "CanScatterMonitorWorkspace"}
workspaces, monitors = provide_loaded_data(state, False, workspace_to_name, workspace_to_monitor)
return workspaces[SANSDataType.SAMPLE_SCATTER]
def crop_workspace(component, workspace):
crop_name = "CropToComponent"
component_to_crop = DetectorType(component)
component_to_crop = get_component_name(workspace, component_to_crop)
crop_options = {"InputWorkspace": workspace,
"OutputWorkspace": EMPTY_NAME,
"ComponentNames": component_to_crop}
crop_alg = create_unmanaged_algorithm(crop_name, **crop_options)
crop_alg.execute()
output_workspace = crop_alg.getProperty("OutputWorkspace").value
return output_workspace
def run_algorithm(input_workspace, range, integral, output_workspace, x_dim, y_dim):
hv_min = range[0]
hv_max = range[1]
if integral == IntegralEnum.Horizontal:
output_workspace = ConvertAxesToRealSpace(InputWorkspace=input_workspace, OutputWorkspace=output_workspace, VerticalAxis='x',
HorizontalAxis='y', NumberVerticalBins=int(x_dim), NumberHorizontalBins=int(y_dim))
output_workspace = SumSpectra(InputWorkspace=output_workspace, OutputWorkspace=output_workspace, StartWorkspaceIndex=hv_min,
EndWorkspaceIndex=hv_max)
elif integral == IntegralEnum.Vertical:
output_workspace = ConvertAxesToRealSpace(InputWorkspace=input_workspace, OutputWorkspace=output_workspace, VerticalAxis='y',
HorizontalAxis='x', NumberVerticalBins=int(x_dim), NumberHorizontalBins=int(y_dim))
output_workspace = SumSpectra(InputWorkspace=output_workspace, OutputWorkspace=output_workspace, StartWorkspaceIndex=hv_min,
EndWorkspaceIndex=hv_max)
elif integral == IntegralEnum.Time:
output_workspace = SumSpectra(InputWorkspace=input_workspace, OutputWorkspace=output_workspace,
StartWorkspaceIndex=hv_min, EndWorkspaceIndex=hv_max)
return output_workspace
def generate_output_workspace_name(range, integral, mask, detector, input_workspace_name):
integral_string = integral.value
detector_string = detector.value
return 'Run:{}, Range:{}, Direction:{}, Detector:{}, Mask:{}'.format(input_workspace_name, range,
integral_string,
detector_string, mask)
def plot_graph(workspace):
plotting_module = get_plotting_module()
if hasattr(plotting_module, 'plotSpectrum'):
return plotting_module.plotSpectrum(workspace, 0)
elif hasattr(plotting_module, 'plot'):
if not isinstance(workspace, list):
workspace = [workspace]
plotting_module.plot(workspace, wksp_indices=[0])
def apply_mask(state, workspace, component):
output_ws = mask_workspace(component_as_string=component, workspace=workspace, state=state)
return output_ws
def get_detector_size_from_sans_file(state, detector):
instrument_file = get_instrument_paths_for_sans_file(state.data.sample_scatter)
if detector == DetectorType.HAB:
x_dim = get_named_elements_from_ipf_file(instrument_file[1], "high-angle-detector-num-columns",
float)['high-angle-detector-num-columns']
y_dim = get_named_elements_from_ipf_file(instrument_file[1], "high-angle-detector-num-rows",
float)['high-angle-detector-num-rows']
else:
x_dim = get_named_elements_from_ipf_file(instrument_file[1], "low-angle-detector-num-columns", float)[
'low-angle-detector-num-columns']
y_dim = get_named_elements_from_ipf_file(instrument_file[1], "low-angle-detector-num-rows", float)[
'low-angle-detector-num-rows']
return x_dim, y_dim
def create_state(state_model_with_view_update, file, period, facility):
table_row = RowEntries(sample_scatter=file, sample_scatter_period=period)
gui_state_director = GuiStateDirector(state_model_with_view_update, facility)
state = gui_state_director.create_state(table_row).all_states
return state | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import os
import re
import uuid
import urllib
import logging
import datetime
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from HTMLParser import HTMLParser
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, settings, security
from website.util import web_url_for
from website.util import api_url_for
from website.exceptions import (
NodeStateError, InvalidRetractionApprovalToken,
InvalidRetractionDisapprovalToken, InvalidEmbargoApprovalToken,
InvalidEmbargoDisapprovalToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.util.permissions import DEFAULT_CONTRIBUTOR_PERMISSIONS
from website.project import signals as project_signals
html_parser = HTMLParser()
logger = logging.getLogger(__name__)
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
try:
MetaSchema.remove()
except AttributeError:
if not settings.DEBUG_MODE:
raise
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
class ApiKey(StoredObject):
# The key is also its primary key
_id = fields.StringField(
primary=True,
default=lambda: str(ObjectId()) + str(uuid.uuid4())
)
# A display name
label = fields.StringField()
@property
def user(self):
return self.user__keyed[0] if self.user__keyed else None
@property
def node(self):
return self.node__keyed[0] if self.node__keyed else None
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', backref='created')
api_key = fields.ForeignField('apikey', backref='created')
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to.
"""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other'),
])
WRITABLE_WHITELIST = [
'title',
'description',
'category',
]
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
api_keys = fields.ForeignField('apikey', list=True, backref='keyed')
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
@property
def is_retracted(self):
if self.retraction is None and self.parent_node:
return self.parent_node.is_retracted
return getattr(self.retraction, 'is_retracted', False)
@property
def pending_retraction(self):
if self.retraction is None and self.parent_node:
return self.parent_node.pending_retraction
return getattr(self.retraction, 'pending_retraction', False)
@property
def embargo_end_date(self):
if self.embargo is None and self.parent_node:
return self.parent_node.embargo_end_date
return getattr(self.embargo, 'embargo_end_date', False)
@property
def pending_embargo(self):
if self.embargo is None and self.parent_node:
return self.parent_node.pending_embargo
return getattr(self.embargo, 'pending_embargo', False)
@property
def pending_registration(self):
if self.embargo is None and self.parent_node:
return self.parent_node.pending_registration
return getattr(self.embargo, 'pending_registration', False)
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param User user: User to grant permission to
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def can_read_children(self, user):
"""Checks if the given user has read permissions on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, 'read'):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.can_read_children(user):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def update(self, fields, auth=None, save=True):
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
values[key] = {
'old': getattr(self, key),
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
self.add_log(NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.add_contributor(contributor=auth.user, log=False, save=False)
new.template_node = self
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_queryset(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return self.date_created
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
self.title = title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
self.description = description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(contributor=user, log=False, save=False)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message)
return forked
def register_node(self, schema, auth, template, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of regitstration to be created
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
if parent:
registered.parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema, auth, template, data, parent=registered
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
api_key = auth.api_key if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
api_key=api_key,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': html_parser.unescape(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@parent_node.setter
def parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def root(self):
if self.parent_node:
return self.parent_node.root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributor': contributor._id,
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
self.set_visible(user, user_dict['visible'], auth=auth)
users.append(user)
user_ids.append(user_dict['id'])
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor, auth=auth)
return True
#Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param contributors: A list of User objects to add as contributors.
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
"""
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public")
if self.embargo_end_date and not self.pending_embargo:
self.embargo.state = Embargo.CANCELLED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
return True
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': html_parser.unescape(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
}
def _initiate_retraction(self, user, justification=None, save=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction()
retraction.initiated_by = user
if justification:
retraction.justification = justification
retraction.state = Retraction.PENDING
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
approval_state = {}
# Create approve/disapprove tokens
for admin in admins:
approval_state[admin._id] = {
'approval_token': security.random_string(30),
'disapproval_token': security.random_string(30),
'has_approved': False
}
retraction.approval_state = approval_state
# Retraction record needs to be saved to ensure the forward reference Node->Retraction
if save:
retraction.save()
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.pending_embargo)):
raise NodeStateError('Only public registrations or active embargoes may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification, save=True)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False, save=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo()
embargo.initiated_by = user
embargo.for_existing_registration = for_existing_registration
# Convert Date to Datetime
embargo.end_date = datetime.datetime.combine(end_date, datetime.datetime.min.time())
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
embargo.approval_state = {
admin._id: {
'approval_token': security.random_string(30),
'disapproval_token': security.random_string(30),
'has_approved': False
} for admin in admins
}
if save:
embargo.save()
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration, save=True)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
# Embargo record needs to be saved to ensure the forward reference Node->Embargo
self.embargo = embargo
if self.is_public:
self.set_privacy('private', Auth(user))
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": self.name,
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
def validate_retraction_state(value):
acceptable_states = [Retraction.PENDING, Retraction.RETRACTED, Retraction.CANCELLED]
if value not in acceptable_states:
raise ValidationValueError('Invalid retraction state assignment.')
return True
class Retraction(StoredObject):
"""Retraction object for public registrations."""
PENDING = 'pending'
RETRACTED = 'retracted'
CANCELLED = 'cancelled'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
initiated_by = fields.ForeignField('user', backref='retracted')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Cru7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'disapproval_token': 'UotzClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'pending', 'retracted', or 'cancelled'
state = fields.StringField(default='pending', validate=validate_retraction_state)
def __repr__(self):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
@property
def is_retracted(self):
return self.state == self.RETRACTED
@property
def pending_retraction(self):
return self.state == self.PENDING
def disapprove_retraction(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['disapproval_token'] != token:
raise InvalidRetractionDisapprovalToken('Invalid retraction disapproval token provided.')
except KeyError:
raise PermissionsError('User must be an admin to disapprove retraction of a registration.')
self.state = self.CANCELLED
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def approve_retraction(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidRetractionApprovalToken('Invalid retraction approval token provided.')
except KeyError:
raise PermissionsError('User must be an admin to disapprove retraction of a registration.')
self.approval_state[user._id]['has_approved'] = True
if all(val['has_approved'] for val in self.approval_state.values()):
self.state = self.RETRACTED
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.pending_embargo:
parent_registration.embargo.state = self.CANCELLED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(user),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
if not parent_registration.is_public:
parent_registration.set_privacy('public')
parent_registration.update_search()
# Retraction status is inherited from the root project, so we
# need to recursively update search for every descendant node
# so that retracted subrojects/components don't appear in search
for node in parent_registration.get_descendants_recursive():
node.update_search()
def validate_embargo_state(value):
acceptable_states = [
Embargo.UNAPPROVED, Embargo.ACTIVE, Embargo.CANCELLED, Embargo.COMPLETED
]
if value not in acceptable_states:
raise ValidationValueError('Invalid embargo state assignment.')
return True
class Embargo(StoredObject):
"""Embargo object for registrations waiting to go public."""
UNAPPROVED = 'unapproved'
ACTIVE = 'active'
CANCELLED = 'cancelled'
COMPLETED = 'completed'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
initiated_by = fields.ForeignField('user', backref='embargoed')
end_date = fields.DateTimeField()
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'disapproval_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'unapproved', 'active', 'cancelled', or 'completed
state = fields.StringField(default='unapproved', validate=validate_embargo_state)
for_existing_registration = fields.BooleanField(default=False)
def __repr__(self):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
@property
def embargo_end_date(self):
if self.state == Embargo.ACTIVE:
return self.end_date
return False
@property
def pending_embargo(self):
return self.state == Embargo.UNAPPROVED
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.pending_embargo
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['disapproval_token'] != token:
raise InvalidEmbargoDisapprovalToken('Invalid embargo disapproval token provided.')
except KeyError:
raise PermissionsError('User must be an admin to disapprove embargoing of a registration.')
self.state = Embargo.CANCELLED
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidEmbargoApprovalToken('Invalid embargo approval token provided.')
except KeyError:
raise PermissionsError('User must be an admin to disapprove embargoing of a registration.')
self.approval_state[user._id]['has_approved'] = True
if all(val['has_approved'] for val in self.approval_state.values()):
self.state = Embargo.ACTIVE
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
from __future__ import unicode_literals
from cio.conf import settings
class BasePlugin(object):
ext = None
@property
def settings(self):
return settings.get(self.ext.upper(), {})
def load(self, content):
"""
Return plugin data for content string
"""
return content
def load_node(self, node):
"""
Return plugin data and modify for raw node
"""
return self.load(node.content)
def save(self, data):
"""
Persist external plugin resources and return content string for plugin data
"""
return data
def save_node(self, node):
"""
Perform action on node, persist external plugin resources and return content string for plugin data
"""
node.content = self.save(node.content)
return node
def publish_node(self, node):
"""
Perform actions on publish and return node to persist
"""
return node
def delete(self, data):
"""
Delete external plugin resources
"""
pass
def delete_node(self, node):
"""
Delete external plugin resources
"""
self.delete(node.content)
def render(self, data):
"""
Render plugin
"""
return data
def render_node(self, node, data):
"""
Prepares node for render and returns rendered content
"""
return self.render(data) | unknown | codeparrot/codeparrot-clean | ||
"""SCons.Conftest
Autoconf-like configuration support; low level implementation of tests.
"""
#
# Copyright (c) 2003 Stichting NLnet Labs
# Copyright (c) 2001, 2002, 2003 Steven Knight
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# The purpose of this module is to define how a check is to be performed.
# Use one of the Check...() functions below.
#
#
# A context class is used that defines functions for carrying out the tests,
# logging and messages. The following methods and members must be present:
#
# context.Display(msg) Function called to print messages that are normally
# displayed for the user. Newlines are explicitly used.
# The text should also be written to the logfile!
#
# context.Log(msg) Function called to write to a log file.
#
# context.BuildProg(text, ext)
# Function called to build a program, using "ext" for the
# file extention. Must return an empty string for
# success, an error message for failure.
# For reliable test results building should be done just
# like an actual program would be build, using the same
# command and arguments (including configure results so
# far).
#
# context.CompileProg(text, ext)
# Function called to compile a program, using "ext" for
# the file extention. Must return an empty string for
# success, an error message for failure.
# For reliable test results compiling should be done just
# like an actual source file would be compiled, using the
# same command and arguments (including configure results
# so far).
#
# context.AppendLIBS(lib_name_list)
# Append "lib_name_list" to the value of LIBS.
# "lib_namelist" is a list of strings.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.PrependLIBS(lib_name_list)
# Prepend "lib_name_list" to the value of LIBS.
# "lib_namelist" is a list of strings.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.SetLIBS(value)
# Set LIBS to "value". The type of "value" is what
# AppendLIBS() returned.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.headerfilename
# Name of file to append configure results to, usually
# "confdefs.h".
# The file must not exist or be empty when starting.
# Empty or None to skip this (some tests will not work!).
#
# context.config_h (may be missing). If present, must be a string, which
# will be filled with the contents of a config_h file.
#
# context.vardict Dictionary holding variables used for the tests and
# stores results from the tests, used for the build
# commands.
# Normally contains "CC", "LIBS", "CPPFLAGS", etc.
#
# context.havedict Dictionary holding results from the tests that are to
# be used inside a program.
# Names often start with "HAVE_". These are zero
# (feature not present) or one (feature present). Other
# variables may have any value, e.g., "PERLVERSION" can
# be a number and "SYSTEMNAME" a string.
#
import re
from types import IntType
#
# PUBLIC VARIABLES
#
LogInputFiles = 1 # Set that to log the input files in case of a failed test
LogErrorMessages = 1 # Set that to log Conftest-generated error messages
#
# PUBLIC FUNCTIONS
#
# Generic remarks:
# - When a language is specified which is not supported the test fails. The
# message is a bit different, because not all the arguments for the normal
# message are available yet (chicken-egg problem).
def CheckBuilder(context, text = None, language = None):
"""
Configure check to see if the compiler works.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
"text" may be used to specify the code to be build.
Returns an empty string for success, an error message for failure.
"""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("%s\n" % msg)
return msg
if not text:
text = """
int main() {
return 0;
}
"""
context.Display("Checking if building a %s file works... " % lang)
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, None, text)
return ret
def CheckCC(context):
"""
Configure check for a working C compiler.
This checks whether the C compiler, as defined in the $CC construction
variable, can compile a C source file. It uses the current $CCCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the C compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'CC', text, 'C')
_YesNoResult(context, ret, None, text)
return ret
def CheckSHCC(context):
"""
Configure check for a working shared C compiler.
This checks whether the C compiler, as defined in the $SHCC construction
variable, can compile a C source file. It uses the current $SHCCCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the (shared) C compiler works")
text = """
int foo()
{
return 0;
}
"""
ret = _check_empty_program(context, 'SHCC', text, 'C', use_shared = True)
_YesNoResult(context, ret, None, text)
return ret
def CheckCXX(context):
"""
Configure check for a working CXX compiler.
This checks whether the CXX compiler, as defined in the $CXX construction
variable, can compile a CXX source file. It uses the current $CXXCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the C++ compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'CXX', text, 'C++')
_YesNoResult(context, ret, None, text)
return ret
def CheckSHCXX(context):
"""
Configure check for a working shared CXX compiler.
This checks whether the CXX compiler, as defined in the $SHCXX construction
variable, can compile a CXX source file. It uses the current $SHCXXCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the (shared) C++ compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'SHCXX', text, 'C++', use_shared = True)
_YesNoResult(context, ret, None, text)
return ret
def _check_empty_program(context, comp, text, language, use_shared = False):
"""Return 0 on success, 1 otherwise."""
if comp not in context.env or not context.env[comp]:
# The compiler construction variable is not set or empty
return 1
lang, suffix, msg = _lang2suffix(language)
if msg:
return 1
if use_shared:
return context.CompileSharedObject(text, suffix)
else:
return context.CompileProg(text, suffix)
def CheckFunc(context, function_name, header = None, language = None):
"""
Configure check for a function "function_name".
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Optional "header" can be defined to define a function prototype, include a
header file or anything else that comes before main().
Sets HAVE_function_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Remarks from autoconf:
# - Don't include <ctype.h> because on OSF/1 3.0 it includes <sys/types.h>
# which includes <sys/select.h> which contains a prototype for select.
# Similarly for bzero.
# - assert.h is included to define __stub macros and hopefully few
# prototypes, which can conflict with char $1(); below.
# - Override any gcc2 internal prototype to avoid an error.
# - We use char for the function declaration because int might match the
# return type of a gcc2 builtin and then its argument prototype would
# still apply.
# - The GNU C library defines this for functions which it implements to
# always fail with ENOSYS. Some functions are actually named something
# starting with __ and the normal name is an alias.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = """
#ifdef __cplusplus
extern "C"
#endif
char %s();""" % function_name
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s(): %s\n" % (function_name, msg))
return msg
text = """
%(include)s
#include <assert.h>
%(hdr)s
int main() {
#if defined (__stub_%(name)s) || defined (__stub___%(name)s)
fail fail fail
#else
%(name)s();
#endif
return 0;
}
""" % { 'name': function_name,
'include': includetext,
'hdr': header }
context.Display("Checking for %s function %s()... " % (lang, function_name))
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + function_name, text,
"Define to 1 if the system has the function `%s'." %\
function_name)
return ret
def CheckHeader(context, header_name, header = None, language = None,
include_quotes = None):
"""
Configure check for a C or C++ header file "header_name".
Optional "header" can be defined to do something before including the
header file (unusual, supported for consistency).
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Sets HAVE_header_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS and $CPPFLAGS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Why compile the program instead of just running the preprocessor?
# It is possible that the header file exists, but actually using it may
# fail (e.g., because it depends on other header files). Thus this test is
# more strict. It may require using the "header" argument.
#
# Use <> by default, because the check is normally used for system header
# files. SCons passes '""' to overrule this.
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"\n' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for header file %s: %s\n"
% (header_name, msg))
return msg
if not include_quotes:
include_quotes = "<>"
text = "%s%s\n#include %s%s%s\n\n" % (includetext, header,
include_quotes[0], header_name, include_quotes[1])
context.Display("Checking for %s header file %s... " % (lang, header_name))
ret = context.CompileProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + header_name, text,
"Define to 1 if you have the <%s> header file." % header_name)
return ret
def CheckType(context, type_name, fallback = None,
header = None, language = None):
"""
Configure check for a C or C++ type "type_name".
Optional "header" can be defined to include a header file.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Sets HAVE_type_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s type: %s\n" % (type_name, msg))
return msg
# Remarks from autoconf about this test:
# - Grepping for the type in include files is not reliable (grep isn't
# portable anyway).
# - Using "TYPE my_var;" doesn't work for const qualified types in C++.
# Adding an initializer is not valid for some C++ classes.
# - Using the type as parameter to a function either fails for K&$ C or for
# C++.
# - Using "TYPE *my_var;" is valid in C for some types that are not
# declared (struct something).
# - Using "sizeof(TYPE)" is valid when TYPE is actually a variable.
# - Using the previous two together works reliably.
text = """
%(include)s
%(header)s
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % { 'include': includetext,
'header': header,
'name': type_name }
context.Display("Checking for %s type %s... " % (lang, type_name))
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + type_name, text,
"Define to 1 if the system has the type `%s'." % type_name)
if ret and fallback and context.headerfilename:
f = open(context.headerfilename, "a")
f.write("typedef %s %s;\n" % (fallback, type_name))
f.close()
return ret
def CheckTypeSize(context, type_name, header = None, language = None, expect = None):
"""This check can be used to get the size of a given type, or to check whether
the type is of expected size.
Arguments:
- type : str
the type to check
- includes : sequence
list of headers to include in the test code before testing the type
- language : str
'C' or 'C++'
- expect : int
if given, will test wether the type has the given number of bytes.
If not given, will automatically find the size.
Returns:
status : int
0 if the check failed, or the found size of the type if the check succeeded."""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s type: %s\n" % (type_name, msg))
return msg
src = includetext + header
if not expect is None:
# Only check if the given size is the right one
context.Display('Checking %s is %d bytes... ' % (type_name, expect))
# test code taken from autoconf: this is a pretty clever hack to find that
# a type is of a given size using only compilation. This speeds things up
# quite a bit compared to straightforward code using TryRun
src = src + r"""
typedef %s scons_check_type;
int main()
{
static int test_array[1 - 2 * !(((long int) (sizeof(scons_check_type))) == %d)];
test_array[0] = 0;
return 0;
}
"""
st = context.CompileProg(src % (type_name, expect), suffix)
if not st:
context.Display("yes\n")
_Have(context, "SIZEOF_%s" % type_name, expect,
"The size of `%s', as computed by sizeof." % type_name)
return expect
else:
context.Display("no\n")
_LogFailed(context, src, st)
return 0
else:
# Only check if the given size is the right one
context.Message('Checking size of %s ... ' % type_name)
# We have to be careful with the program we wish to test here since
# compilation will be attempted using the current environment's flags.
# So make sure that the program will compile without any warning. For
# example using: 'int main(int argc, char** argv)' will fail with the
# '-Wall -Werror' flags since the variables argc and argv would not be
# used in the program...
#
src = src + """
#include <stdlib.h>
#include <stdio.h>
int main() {
printf("%d", (int)sizeof(""" + type_name + """));
return 0;
}
"""
st, out = context.RunProg(src, suffix)
try:
size = int(out)
except ValueError:
# If cannot convert output of test prog to an integer (the size),
# something went wront, so just fail
st = 1
size = 0
if not st:
context.Display("yes\n")
_Have(context, "SIZEOF_%s" % type_name, size,
"The size of `%s', as computed by sizeof." % type_name)
return size
else:
context.Display("no\n")
_LogFailed(context, src, st)
return 0
return 0
def CheckDeclaration(context, symbol, includes = None, language = None):
"""Checks whether symbol is declared.
Use the same test as autoconf, that is test whether the symbol is defined
as a macro or can be used as an r-value.
Arguments:
symbol : str
the symbol to check
includes : str
Optional "header" can be defined to include a header file.
language : str
only C and C++ supported.
Returns:
status : bool
True if the check failed, False if succeeded."""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not includes:
includes = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for declaration %s: %s\n" % (symbol, msg))
return msg
src = includetext + includes
context.Display('Checking whether %s is declared... ' % symbol)
src = src + r"""
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}
""" % (symbol, symbol)
st = context.CompileProg(src, suffix)
_YesNoResult(context, st, "HAVE_DECL_" + symbol, src,
"Set to 1 if %s is defined." % symbol)
return st
def CheckLib(context, libs, func_name = None, header = None,
extra_libs = None, call = None, language = None, autoadd = 1,
append = True):
"""
Configure check for a C or C++ libraries "libs". Searches through
the list of libraries, until one is found where the test succeeds.
Tests if "func_name" or "call" exists in the library. Note: if it exists
in another library the test succeeds anyway!
Optional "header" can be defined to include a header file. If not given a
default prototype for "func_name" is added.
Optional "extra_libs" is a list of library names to be added after
"lib_name" in the build command. To be used for libraries that "lib_name"
depends on.
Optional "call" replaces the call to "func_name" in the test code. It must
consist of complete C statements, including a trailing ";".
Both "func_name" and "call" arguments are optional, and in that case, just
linking against the libs is tested.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
text = """
%s
%s""" % (includetext, header)
# Add a function declaration if needed.
if func_name and func_name != "main":
if not header:
text = text + """
#ifdef __cplusplus
extern "C"
#endif
char %s();
""" % func_name
# The actual test code.
if not call:
call = "%s();" % func_name
# if no function to test, leave main() blank
text = text + """
int
main() {
%s
return 0;
}
""" % (call or "")
if call:
i = call.find("\n")
if i > 0:
calltext = call[:i] + ".."
elif call[-1] == ';':
calltext = call[:-1]
else:
calltext = call
for lib_name in libs:
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for library %s: %s\n" % (lib_name, msg))
return msg
# if a function was specified to run in main(), say it
if call:
context.Display("Checking for %s in %s library %s... "
% (calltext, lang, lib_name))
# otherwise, just say the name of library and language
else:
context.Display("Checking for %s library %s... "
% (lang, lib_name))
if lib_name:
l = [ lib_name ]
if extra_libs:
l.extend(extra_libs)
if append:
oldLIBS = context.AppendLIBS(l)
else:
oldLIBS = context.PrependLIBS(l)
sym = "HAVE_LIB" + lib_name
else:
oldLIBS = -1
sym = None
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, sym, text,
"Define to 1 if you have the `%s' library." % lib_name)
if oldLIBS != -1 and (ret or not autoadd):
context.SetLIBS(oldLIBS)
if not ret:
return ret
return ret
#
# END OF PUBLIC FUNCTIONS
#
def _YesNoResult(context, ret, key, text, comment = None):
"""
Handle the result of a test with a "yes" or "no" result.
"ret" is the return value: empty if OK, error message when not.
"key" is the name of the symbol to be defined (HAVE_foo).
"text" is the source code of the program used for testing.
"comment" is the C comment to add above the line defining the symbol (the
comment is automatically put inside a /* */). If None, no comment is added.
"""
if key:
_Have(context, key, not ret, comment)
if ret:
context.Display("no\n")
_LogFailed(context, text, ret)
else:
context.Display("yes\n")
def _Have(context, key, have, comment = None):
"""
Store result of a test in context.havedict and context.headerfilename.
"key" is a "HAVE_abc" name. It is turned into all CAPITALS and non-
alphanumerics are replaced by an underscore.
The value of "have" can be:
1 - Feature is defined, add "#define key".
0 - Feature is not defined, add "/* #undef key */".
Adding "undef" is what autoconf does. Not useful for the
compiler, but it shows that the test was done.
number - Feature is defined to this number "#define key have".
Doesn't work for 0 or 1, use a string then.
string - Feature is defined to this string "#define key have".
Give "have" as is should appear in the header file, include quotes
when desired and escape special characters!
"""
key_up = key.upper()
key_up = re.sub('[^A-Z0-9_]', '_', key_up)
context.havedict[key_up] = have
if have == 1:
line = "#define %s 1\n" % key_up
elif have == 0:
line = "/* #undef %s */\n" % key_up
elif isinstance(have, IntType):
line = "#define %s %d\n" % (key_up, have)
else:
line = "#define %s %s\n" % (key_up, str(have))
if comment is not None:
lines = "\n/* %s */\n" % comment + line
else:
lines = "\n" + line
if context.headerfilename:
f = open(context.headerfilename, "a")
f.write(lines)
f.close()
elif hasattr(context,'config_h'):
context.config_h = context.config_h + lines
def _LogFailed(context, text, msg):
"""
Write to the log about a failed program.
Add line numbers, so that error messages can be understood.
"""
if LogInputFiles:
context.Log("Failed program was:\n")
lines = text.split('\n')
if len(lines) and lines[-1] == '':
lines = lines[:-1] # remove trailing empty line
n = 1
for line in lines:
context.Log("%d: %s\n" % (n, line))
n = n + 1
if LogErrorMessages:
context.Log("Error message: %s\n" % msg)
def _lang2suffix(lang):
"""
Convert a language name to a suffix.
When "lang" is empty or None C is assumed.
Returns a tuple (lang, suffix, None) when it works.
For an unrecognized language returns (None, None, msg).
Where:
lang = the unified language name
suffix = the suffix, including the leading dot
msg = an error message
"""
if not lang or lang in ["C", "c"]:
return ("C", ".c", None)
if lang in ["c++", "C++", "cpp", "CXX", "cxx"]:
return ("C++", ".cpp", None)
return None, None, "Unsupported language: %s" % lang
# vim: set sw=4 et sts=4 tw=79 fo+=l:
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
Compact Backtrace Format
========================
We would like to be able to efficiently store and access backtraces,
but we also wish to minimise the memory used to store them. Since
backtraces typically contain a good deal of redundancy, it should be
possible to compress the data.
Compact Backtrace Format (CBF) is a binary format for holding a
backtrace; this specification addresses only the storage of the actual
stack backtrace, and it does not consider storage of ancillary data
(register contents, image lists and so on). Those will be dealt with
separately elsewhere.
## General Format
Compact Backtrace Format data is byte aligned and starts with an
information byte:
~~~
7 6 5 4 3 2 1 0
┌───────────────────────┬───────┐
│ version │ size │
└───────────────────────┴───────┘
~~~
The `version` field identifies the version of CBF that is in use; this
document describes version `0`. The `size` field is encqoded as
follows:
| `size` | Machine word size |
| :----: | :---------------- |
| 00 | 16-bit |
| 01 | 32-bit |
| 10 | 64-bit |
| 11 | Reserved |
This is followed by a series of instructions that tell the reader how
to decode subsequent data.
The first instruction that computes an address _must_ specify an
absolute address (the `a` bit must be set).
## Instructions
The following instructions are currently defined
| `opcode` | Mnemonic | Meaning |
| :--------: | :------- | :---------------------------------------- |
| `00000000` | `end` | Marks the end of the backtrace |
| `00000001` | `trunc` | As above, but the backtrace was truncated |
| `0000xxxx` | reserved | Reserved for future expansion |
| `0001axxx` | `pc` | A program counter value follows |
| `0010axxx` | `ra` | A return address value follows |
| `0011axxx` | `async` | An async resume point follows |
| `01xxxxxx` | `omit` | Indicates frames have been omitted |
| `1000xxxx` | `rep` | Repeat the previous frame |
| `1xxxxxxx` | reserved | Reserved for future expansion |
If the bit labelled `a` is set, it means that the address computation
is absolute rather than being relative to the previously computed
address.
### `end`/`trunc`
#### Encoding
~~~
7 6 5 4 3 2 1 0
┌───────────────────────────┬───┐
│ 0 0 0 0 0 0 0 │ t │ end (or trunc if t is 1)
└───────────────────────────┴───┘
~~~
#### Meaning
Marks the end of the backtrace data. If `t` is set, it indicates that
the backtrace was truncated at this point (for instance because we hit
a frame limit while capturing).
It is not strictly necessary to use the `end` instruction if the
CBF data is of a known length.
### `pc`, `ra`, `async`
#### Encoding
~~~
7 6 5 4 3 2 1 0
┌────────────────┬───┬──────────┐
│ 0 0 0 1 │ a │ count │ pc
└────────────────┴───┴──────────┘
┌────────────────┬───┬──────────┐
│ 0 0 1 0 │ a │ count │ ra
└────────────────┴───┴──────────┘
┌────────────────┬───┬──────────┐
│ 0 0 1 1 │ a │ count │ async
└────────────────┴───┴──────────┘
~~~
#### Meaning
Each of these instructions represents a frame on the stack. For `pc`
frames, the computed address is an actual program counter (aka
instruction pointer) value. `ra` instructions instead represent a
_return address_, the difference being that the program has not yet
executed that instruction. `async` instructions point at the entry
point of an async resume function, and are used when walking stacks on
systems that support `async`/`await` primitives that are implemented
by function splitting (typically an `async` instruction will point at
the start of a function containing the code immediately following an
`await`).
The next `count + 1` bytes following the instruction are an address
value. If `a` is set, the computed address is equal to the address
value. If `a` is not set, the computed address is equal to the
preceding computed address *plus* the address value.
Address values are sign-extended to the machine word width before
processing. Thus a single address byte with value `0xff` on a 32-bit
backtrace represents the address value `0xffffffff`.
### `omit`
#### Encoding
~~~
7 6 5 4 3 2 1 0
┌───────┬───┬───────────────────┐
│ 0 1 │ x │ count │ omit
└───────┴───┴───────────────────┘
~~~
#### Meaning
Indicates that a number of frames were skipped when capturing the
backtrace. This is used to allow a backtrace to include both the top
and bottom of the stack, without carrying every intervening frame, and
is useful to prevent the data from exploding where recursion has taken
place.
If `x` is `1`, the instruction is followed by `count + 1` bytes (up to the
machine word length) that are zero-extended to machine word length and
that represent a count of the number of frames that were omitted.
If `x` is `0`, `count + 1` is the number of frames that were omitted.
### `rep`
#### Encoding
~~~
7 6 5 4 3 2 1 0
┌────────────────┬───┬──────────┐
│ 1 0 0 0 │ x │ count │ repeat
└────────────────┴───┴──────────┘
~~~
#### Meaning
Repeat the previous frame.
If `x` is `1`, the instruction is followed by `count + 1` bytes that are zero
extended to machine word length and that represent a count of the number of
times to repeat the preceding frame.
If `x` is `0`, the previous frame should be repeated `count + 1` times. | unknown | github | https://github.com/apple/swift | docs/CompactBacktraceFormat.md |
use std::ops::ControlFlow;
use either::Either;
use itertools::Itertools as _;
use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{Diag, Subdiagnostic};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{self, ConstraintCategory, Location};
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
};
use rustc_span::Span;
use rustc_trait_selection::error_reporting::infer::region::unexpected_hidden_region_diagnostic;
use rustc_trait_selection::errors::impl_trait_overcapture_suggestion;
use crate::MirBorrowckCtxt;
use crate::borrow_set::BorrowData;
use crate::consumers::RegionInferenceContext;
use crate::region_infer::opaque_types::DeferredOpaqueTypeError;
use crate::type_check::Locations;
impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
pub(crate) fn report_opaque_type_errors(&mut self, errors: Vec<DeferredOpaqueTypeError<'tcx>>) {
if errors.is_empty() {
return;
}
let infcx = self.infcx;
let mut guar = None;
let mut last_unexpected_hidden_region: Option<(Span, Ty<'_>, ty::OpaqueTypeKey<'tcx>)> =
None;
for error in errors {
guar = Some(match error {
DeferredOpaqueTypeError::InvalidOpaqueTypeArgs(err) => err.report(infcx),
DeferredOpaqueTypeError::LifetimeMismatchOpaqueParam(err) => {
infcx.dcx().emit_err(err)
}
DeferredOpaqueTypeError::UnexpectedHiddenRegion {
opaque_type_key,
hidden_type,
member_region,
} => {
let named_ty =
self.regioncx.name_regions_for_member_constraint(infcx.tcx, hidden_type.ty);
let named_key = self
.regioncx
.name_regions_for_member_constraint(infcx.tcx, opaque_type_key);
let named_region =
self.regioncx.name_regions_for_member_constraint(infcx.tcx, member_region);
let diag = unexpected_hidden_region_diagnostic(
infcx,
self.mir_def_id(),
hidden_type.span,
named_ty,
named_region,
named_key,
);
if last_unexpected_hidden_region
!= Some((hidden_type.span, named_ty, named_key))
{
last_unexpected_hidden_region =
Some((hidden_type.span, named_ty, named_key));
diag.emit()
} else {
diag.delay_as_bug()
}
}
DeferredOpaqueTypeError::NonDefiningUseInDefiningScope {
span,
opaque_type_key,
} => infcx.dcx().span_err(
span,
format!(
"non-defining use of `{}` in the defining scope",
Ty::new_opaque(
infcx.tcx,
opaque_type_key.def_id.to_def_id(),
opaque_type_key.args
)
),
),
});
}
let guar = guar.unwrap();
self.root_cx.set_tainted_by_errors(guar);
self.infcx.set_tainted_by_errors(guar);
}
/// Try to note when an opaque is involved in a borrowck error and that
/// opaque captures lifetimes due to edition 2024.
// FIXME: This code is otherwise somewhat general, and could easily be adapted
// to explain why other things overcapture... like async fn and RPITITs.
pub(crate) fn note_due_to_edition_2024_opaque_capture_rules(
&self,
borrow: &BorrowData<'tcx>,
diag: &mut Diag<'_>,
) {
// We look at all the locals. Why locals? Because it's the best thing
// I could think of that's correlated with the *instantiated* higher-ranked
// binder for calls, since we don't really store those anywhere else.
for ty in self.body.local_decls.iter().map(|local| local.ty) {
if !ty.has_opaque_types() {
continue;
}
let tcx = self.infcx.tcx;
let ControlFlow::Break((opaque_def_id, offending_region_idx, location)) = ty
.visit_with(&mut FindOpaqueRegion {
regioncx: &self.regioncx,
tcx,
borrow_region: borrow.region,
})
else {
continue;
};
// If an opaque explicitly captures a lifetime, then no need to point it out.
// FIXME: We should be using a better heuristic for `use<>`.
if tcx.rendered_precise_capturing_args(opaque_def_id).is_some() {
continue;
}
// If one of the opaque's bounds mentions the region, then no need to
// point it out, since it would've been captured on edition 2021 as well.
//
// Also, while we're at it, collect all the lifetimes that the opaque
// *does* mention. We'll use that for the `+ use<'a>` suggestion below.
let mut visitor = CheckExplicitRegionMentionAndCollectGenerics {
tcx,
generics: tcx.generics_of(opaque_def_id),
offending_region_idx,
seen_opaques: [opaque_def_id].into_iter().collect(),
seen_lifetimes: Default::default(),
};
if tcx
.explicit_item_bounds(opaque_def_id)
.skip_binder()
.visit_with(&mut visitor)
.is_break()
{
continue;
}
// If we successfully located a terminator, then point it out
// and provide a suggestion if it's local.
match self.body.stmt_at(location) {
Either::Right(mir::Terminator { source_info, .. }) => {
diag.span_note(
source_info.span,
"this call may capture more lifetimes than intended, \
because Rust 2024 has adjusted the `impl Trait` lifetime capture rules",
);
let mut captured_args = visitor.seen_lifetimes;
// Add in all of the type and const params, too.
// Ordering here is kinda strange b/c we're walking backwards,
// but we're trying to provide *a* suggestion, not a nice one.
let mut next_generics = Some(visitor.generics);
let mut any_synthetic = false;
while let Some(generics) = next_generics {
for param in &generics.own_params {
if param.kind.is_ty_or_const() {
captured_args.insert(param.def_id);
}
if param.kind.is_synthetic() {
any_synthetic = true;
}
}
next_generics = generics.parent.map(|def_id| tcx.generics_of(def_id));
}
if let Some(opaque_def_id) = opaque_def_id.as_local()
&& let hir::OpaqueTyOrigin::FnReturn { parent, .. } =
tcx.hir_expect_opaque_ty(opaque_def_id).origin
{
if let Some(sugg) = impl_trait_overcapture_suggestion(
tcx,
opaque_def_id,
parent,
captured_args,
) {
sugg.add_to_diag(diag);
}
} else {
diag.span_help(
tcx.def_span(opaque_def_id),
format!(
"if you can modify this crate, add a precise \
capturing bound to avoid overcapturing: `+ use<{}>`",
if any_synthetic {
"/* Args */".to_string()
} else {
captured_args
.into_iter()
.map(|def_id| tcx.item_name(def_id))
.join(", ")
}
),
);
}
return;
}
Either::Left(_) => {}
}
}
}
}
/// This visitor contains the bulk of the logic for this lint.
struct FindOpaqueRegion<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
borrow_region: ty::RegionVid,
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for FindOpaqueRegion<'_, 'tcx> {
type Result = ControlFlow<(DefId, usize, Location), ()>;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
// If we find an opaque in a local ty, then for each of its captured regions,
// try to find a path between that captured regions and our borrow region...
if let ty::Alias(ty::Opaque, opaque) = *ty.kind()
&& let hir::OpaqueTyOrigin::FnReturn { parent, in_trait_or_impl: None } =
self.tcx.opaque_ty_origin(opaque.def_id)
{
let variances = self.tcx.variances_of(opaque.def_id);
for (idx, (arg, variance)) in std::iter::zip(opaque.args, variances).enumerate() {
// Skip uncaptured args.
if *variance == ty::Bivariant {
continue;
}
// We only care about regions.
let Some(opaque_region) = arg.as_region() else {
continue;
};
// Don't try to convert a late-bound region, which shouldn't exist anyways (yet).
if opaque_region.is_bound() {
continue;
}
let opaque_region_vid = self.regioncx.to_region_vid(opaque_region);
// Find a path between the borrow region and our opaque capture.
if let Some(path) = self
.regioncx
.constraint_path_between_regions(self.borrow_region, opaque_region_vid)
{
for constraint in path {
// If we find a call in this path, then check if it defines the opaque.
if let ConstraintCategory::CallArgument(Some(call_ty)) = constraint.category
&& let ty::FnDef(call_def_id, _) = *call_ty.kind()
// This function defines the opaque :D
&& call_def_id == parent
&& let Locations::Single(location) = constraint.locations
{
return ControlFlow::Break((opaque.def_id, idx, location));
}
}
}
}
}
ty.super_visit_with(self)
}
}
struct CheckExplicitRegionMentionAndCollectGenerics<'tcx> {
tcx: TyCtxt<'tcx>,
generics: &'tcx ty::Generics,
offending_region_idx: usize,
seen_opaques: FxIndexSet<DefId>,
seen_lifetimes: FxIndexSet<DefId>,
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for CheckExplicitRegionMentionAndCollectGenerics<'tcx> {
type Result = ControlFlow<(), ()>;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
match *ty.kind() {
ty::Alias(ty::Opaque, opaque) => {
if self.seen_opaques.insert(opaque.def_id) {
for (bound, _) in self
.tcx
.explicit_item_bounds(opaque.def_id)
.iter_instantiated_copied(self.tcx, opaque.args)
{
bound.visit_with(self)?;
}
}
ControlFlow::Continue(())
}
_ => ty.super_visit_with(self),
}
}
fn visit_region(&mut self, r: ty::Region<'tcx>) -> Self::Result {
match r.kind() {
ty::ReEarlyParam(param) => {
if param.index as usize == self.offending_region_idx {
ControlFlow::Break(())
} else {
self.seen_lifetimes.insert(self.generics.region_param(param, self.tcx).def_id);
ControlFlow::Continue(())
}
}
_ => ControlFlow::Continue(()),
}
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_borrowck/src/diagnostics/opaque_types.rs |
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from itertools import chain, count
import networkx as nx
from networkx.utils import make_str
__author__ = """Aric Hagberg (hagberg@lanl.gov))"""
__all__ = ['tree_data', 'tree_graph']
_attrs = dict(id='id', children='children')
def tree_data(G, root, attrs=_attrs):
"""Return data in tree format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : NetworkX graph
G must be an oriented tree
root : node
The root of the tree
attrs : dict
A dictionary that contains two keys 'id' and 'children'. The
corresponding values provide the attribute names for storing
NetworkX-internal graph data. The values should be unique. Default
value: :samp:`dict(id='id', children='children')`.
If some user-defined graph data use these attribute names as data keys,
they may be silently dropped.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Raises
------
NetworkXError
If values in attrs are not unique.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.DiGraph([(1,2)])
>>> data = json_graph.tree_data(G,root=1)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Node attributes are stored in this format but keys
for attributes must be strings if you want to serialize with JSON.
Graph and edge attributes are not stored.
The default value of attrs will be changed in a future release of NetworkX.
See Also
--------
tree_graph, node_link_data, node_link_data
"""
if G.number_of_nodes() != G.number_of_edges() + 1:
raise TypeError("G is not a tree.")
if not G.is_directed():
raise TypeError("G is not directed.")
id_ = attrs['id']
children = attrs['children']
if id_ == children:
raise nx.NetworkXError('Attribute names are not unique.')
def add_children(n, G):
nbrs = G[n]
if len(nbrs) == 0:
return []
children_ = []
for child in nbrs:
d = dict(chain(G.node[child].items(), [(id_, child)]))
c = add_children(child, G)
if c:
d[children] = c
children_.append(d)
return children_
data = dict(chain(G.node[root].items(), [(id_, root)]))
data[children] = add_children(root, G)
return data
def tree_graph(data, attrs=_attrs):
"""Return graph from tree data format.
Parameters
----------
data : dict
Tree formatted graph data
Returns
-------
G : NetworkX DiGraph
attrs : dict
A dictionary that contains two keys 'id' and 'children'. The
corresponding values provide the attribute names for storing
NetworkX-internal graph data. The values should be unique. Default
value: :samp:`dict(id='id', children='children')`.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.DiGraph([(1,2)])
>>> data = json_graph.tree_data(G,root=1)
>>> H = json_graph.tree_graph(data)
Notes
-----
The default value of attrs will be changed in a future release of NetworkX.
See Also
--------
tree_graph, node_link_data, adjacency_data
"""
graph = nx.DiGraph()
id_ = attrs['id']
children = attrs['children']
def add_children(parent, children_):
for data in children_:
child = data[id_]
graph.add_edge(parent, child)
grandchildren = data.get(children, [])
if grandchildren:
add_children(child, grandchildren)
nodedata = dict((make_str(k), v) for k, v in data.items()
if k != id_ and k != children)
graph.add_node(child, attr_dict=nodedata)
root = data[id_]
children_ = data.get(children, [])
nodedata = dict((make_str(k), v) for k, v in data.items()
if k != id_ and k != children)
graph.add_node(root, attr_dict=nodedata)
add_children(root, children_)
return graph | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
"""Runner Script for Robot Framework SeleniumLibrary Demo
Tests are run by giving a path to the tests to be executed as an argument to
this script. Possible Robot Framework options are given before the path.
Examples:
rundemo.py login_tests # Run all tests in a directory
rundemo.py login_tests/valid_login.text # Run tests in a specific file
rundemo.py --variable BROWSER:IE login_tests # Override variable
rundemo.py -v BROWSER:IE -v DELAY:0.25 login_tests
By default tests are executed with Firefox browser, but this can be changed
by overriding the `BROWSER` variable as illustrated above. Similarly it is
possible to slow down the test execution by overriding the `DELAY` variable
with a non-zero value.
When tests are run, the demo application is started and stopped automatically.
It is also possible to start and stop the application separately
by using `demoapp` options. This allows running tests with the
normal `pybot` start-up script, as well as investigating the demo application.
Running the demo requires that Robot Framework, Selenium2Library, Python, and
Java to be installed.
"""
import os
import sys
from tempfile import TemporaryFile
from subprocess import Popen, call, STDOUT
try:
import Selenium2Library
except ImportError, e:
print 'Importing Selenium2Library module failed (%s).' % e
print 'Please make sure you have Selenium2Library properly installed.'
print 'See INSTALL.rst for troubleshooting information.'
sys.exit(1)
ROOT = os.path.dirname(os.path.abspath(__file__))
DEMOAPP = os.path.join(ROOT, 'demoapp', 'server.py')
def run_tests(args):
start_demo_application()
call(['pybot'] + args, shell=(os.sep == '\\'))
stop_demo_application()
def start_demo_application():
Popen(['python', DEMOAPP, 'start'], stdout=TemporaryFile(), stderr=STDOUT)
def stop_demo_application():
call(['python', DEMOAPP, 'stop'], stdout=TemporaryFile(), stderr=STDOUT)
def print_help():
print __doc__
def print_usage():
print 'Usage: rundemo.py [options] datasource'
print ' or: rundemo.py demoapp start|stop'
print ' or: rundemo.py help'
if __name__ == '__main__':
action = {'demoapp-start': start_demo_application,
'demoapp-stop': stop_demo_application,
'help': print_help,
'': print_usage}.get('-'.join(sys.argv[1:]))
if action:
action()
else:
run_tests(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP - Account renumber wizard
# Copyright (C) 2009 Pexego Sistemas Informáticos.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import xmlrpclib
import logging
logger = logging.getLogger("create_lots_of_account_moves")
def create_lots_of_account_moves(dbname, user, passwd, howmany):
"""Small Odoo function that will create lots of account moves on the
selected database, that can later be used for testing the renumber wizard.
Note: The database must have demo data, and a fiscal year 2009 created.
"""
url_template = "http://%s:%s/xmlrpc/%s"
server = "localhost"
port = 8069
login_facade = xmlrpclib.ServerProxy(
url_template % (server, port, 'common'))
user_id = login_facade.login(dbname, user, passwd)
object_facade = xmlrpclib.ServerProxy(
url_template % (server, port, 'object'))
for i in range(1, howmany):
# Create one account move
move_id = object_facade.execute(dbname, user_id, passwd,
'account.move', 'create', {
'ref': 'Test%s' % i,
'type': 'journal_voucher',
'journal_id': 5,
'line_id': [
(0, 0, {
'analytic_account_id': False,
'currency_id': False,
'tax_amount': False,
'account_id': 2,
'partner_id': False,
'tax_code_id': False,
'credit': 1000.0,
'date_maturity': False,
'debit': False,
'amount_currency': False,
'ref': False,
'name': 'Test_l1'
}),
(0, 0, {
'analytic_account_id': False,
'currency_id': False,
'tax_amount': False,
'account_id': 4,
'partner_id': False,
'tax_code_id': False,
'credit': False,
'date_maturity': False,
'debit': 1000.0,
'amount_currency': False,
'ref': False,
'name': 'Test_l2'})
],
'period_id': 1,
'date': '2009-01-%s' % (
(i % 31) or 1
),
'partner_id': False,
'to_check': 0
},
{})
# Validate the move
object_facade.execute(dbname, user_id, passwd,
u'account.move', 'button_validate',
[move_id], {})
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 5:
logger.info(u"Usage: %s <dbname> <user> <password> <howmany>" %
sys.argv[0])
else:
create_lots_of_account_moves(
sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4])) | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from ..shared.function_definition import FunctionDefinition
__all__ = ["ChatCompletionFunctionTool"]
class ChatCompletionFunctionTool(BaseModel):
"""A function tool that can be used to generate a response."""
function: FunctionDefinition
type: Literal["function"]
"""The type of the tool. Currently, only `function` is supported.""" | python | github | https://github.com/openai/openai-python | src/openai/types/chat/chat_completion_function_tool.py |
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/renesas,ceu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas Capture Engine Unit (CEU)
maintainers:
- Jacopo Mondi <jacopo+renesas@jmondi.org>
- linux-renesas-soc@vger.kernel.org
description: |+
The Capture Engine Unit is the image capture interface found in the Renesas SH
Mobile, R-Mobile and RZ SoCs. The interface supports a single parallel input
with data bus width of 8 or 16 bits.
properties:
compatible:
enum:
- renesas,r7s72100-ceu
- renesas,r8a7740-ceu
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
power-domains:
maxItems: 1
port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
properties:
endpoint:
$ref: video-interfaces.yaml#
unevaluatedProperties: false
properties:
hsync-active: true
vsync-active: true
field-even-active: false
bus-width:
enum: [8, 16]
default: 8
required:
- compatible
- reg
- interrupts
- clocks
- power-domains
- port
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/r7s72100-clock.h>
ceu: ceu@e8210000 {
reg = <0xe8210000 0x209c>;
compatible = "renesas,r7s72100-ceu";
interrupts = <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp6_clks R7S72100_CLK_CEU>;
power-domains = <&cpg_clocks>;
port {
ceu_in: endpoint {
remote-endpoint = <&ov7670_out>;
hsync-active = <1>;
vsync-active = <0>;
};
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/media/renesas,ceu.yaml |
"""View for block details specialized for a GASSupplierOrder"""
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.http import HttpResponse
from django.db import transaction
from flexi_auth.models import ObjectWithContext
from gasistafelice.lib.shortcuts import render_to_response, render_to_xml_response, render_to_context_response
from gasistafelice.rest.views.blocks import details
from gasistafelice.gas.forms import cash as order_cash_forms
from gasistafelice.consts import CASH, VIEW, EDIT_MULTIPLE, INCOME
from gasistafelice.rest.views.blocks.base import ResourceBlockAction
from gasistafelice.rest.views.blocks import AbstractBlock
from gasistafelice.gas.forms.cash import InvoiceOrderForm
from django.conf import settings
import logging
log = logging.getLogger(__name__)
class Block(AbstractBlock):
BLOCK_NAME = "order_invoice"
BLOCK_VALID_RESOURCE_TYPES = ["order"]
def __init__(self):
super(Block, self).__init__()
self.description = _("Actual total registration")
def _get_user_actions(self, request):
user_actions = []
order = self.resource.order
if request.user.has_perm(CASH, obj=ObjectWithContext(order.gas)) or \
request.user == order.referrer_person.user:
if order.is_closed() or order.is_unpaid():
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = self.resource,
name=INCOME, verbose_name=_("Register"),
popup_form=False,
),
]
return user_actions
def get_response(self, request, resource_type, resource_id, args):
super(Block, self).get_response(request, resource_type, resource_id, args)
res = self.resource
user_actions = self._get_user_actions(request)
if args == "INCOME":
if request.method == 'POST':
form = InvoiceOrderForm(request, request.POST)
if form.is_valid():
with transaction.commit_on_success():
if form.cleaned_data:
try:
form.save()
# return self.response_success()
except Exception, e:
if settings.FORM_DEBUG:
raise
else:
msg = _("Error in invoice registration: ") + e.message
form._errors["amount"] = form.error_class([msg])
else:
form = InvoiceOrderForm(request)
ctx = {
'resource' : res,
'sanet_urn' : "%s/%s" % (resource_type, resource_id),
'form' : form,
'user_actions' : user_actions,
}
return render_to_xml_response('blocks/order_invoice.xml', ctx) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
"""
Collect the elasticsearch stats for the local node.
Supports multiple instances. When using the 'instances'
parameter the instance alias will be appended to the
'path' parameter.
#### Dependencies
* urlib2
"""
import urllib2
import re
from diamond.collector import str_to_bool
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
RE_LOGSTASH_INDEX = re.compile('^(.*)-\d\d\d\d\.\d\d\.\d\d$')
class ElasticSearchCollector(diamond.collector.Collector):
def process_config(self):
super(ElasticSearchCollector, self).process_config()
instance_list = self.config['instances']
if isinstance(instance_list, basestring):
instance_list = [instance_list]
if len(instance_list) == 0:
host = self.config['host']
port = self.config['port']
# use empty alias to identify single-instance config
# omitting the use of the alias in the metrics path
instance_list.append('@%s:%s' % (host, port))
self.instances = {}
for instance in instance_list:
if '@' in instance:
(alias, hostport) = instance.split('@', 1)
else:
alias = 'default'
hostport = instance
if ':' in hostport:
host, port = hostport.split(':', 1)
else:
host = hostport
port = 9200
self.instances[alias] = (host, int(port))
def get_default_config_help(self):
config_help = super(ElasticSearchCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'instances': "List of instances. When set this overrides "
"the 'host' and 'port' settings. Instance format: "
"instance [<alias>@]<hostname>[:<port>]",
'stats': "Available stats: \n"
+ " - jvm (JVM information) \n"
+ " - thread_pool (Thread pool information) \n"
+ " - indices (Individual index stats)\n",
'logstash_mode': "If 'indices' stats are gathered, remove "
+ "the YYYY.MM.DD suffix from the index name "
+ "(e.g. logstash-adm-syslog-2014.01.03) and use that "
+ "as a bucket for all 'day' index stats.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ElasticSearchCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 9200,
'instances': [],
'path': 'elasticsearch',
'stats': ['jvm', 'thread_pool', 'indices'],
'logstash_mode': False,
'cluster': False,
})
return config
def _get(self, host, port, path, assert_key=None):
"""
Execute a ES API call. Convert response into JSON and
optionally assert its structure.
"""
url = 'http://%s:%i/%s' % (host, port, path)
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error("%s: %s", url, err)
return False
try:
doc = json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as a"
+ " json object")
return False
if assert_key and assert_key not in doc:
self.log.error("Bad response from elasticsearch, expected key "
"'%s' was missing for %s" % (assert_key, url))
return False
return doc
def _copy_one_level(self, metrics, prefix, data, filter=lambda key: True):
for key, value in data.iteritems():
if filter(key):
metric_path = '%s.%s' % (prefix, key)
self._set_or_sum_metric(metrics, metric_path, value)
def _copy_two_level(self, metrics, prefix, data, filter=lambda key: True):
for key1, d1 in data.iteritems():
self._copy_one_level(metrics, '%s.%s' % (prefix, key1), d1, filter)
def _index_metrics(self, metrics, prefix, index):
if self.config['logstash_mode']:
"""Remove the YYYY.MM.DD bit from logstash indices.
This way we keep using the same metric naming and not polute
our metrics system (e.g. Graphite) with new metrics every day."""
m = RE_LOGSTASH_INDEX.match(prefix)
if m:
prefix = m.group(1)
# keep a telly of the number of indexes
self._set_or_sum_metric(metrics,
'%s.indexes_in_group' % prefix, 1)
self._add_metric(metrics, '%s.docs.count' % prefix, index,
['docs', 'count'])
self._add_metric(metrics, '%s.docs.deleted' % prefix, index,
['docs', 'deleted'])
self._add_metric(metrics, '%s.datastore.size' % prefix, index,
['store', 'size_in_bytes'])
# publish all 'total' and 'time_in_millis' stats
self._copy_two_level(
metrics, prefix, index,
lambda key: key.endswith('total') or key.endswith('time_in_millis'))
def _add_metric(self, metrics, metric_path, data, data_path):
"""If the path specified by data_path (a list) exists in data,
add to metrics. Use when the data path may not be present"""
current_item = data
for path_element in data_path:
current_item = current_item.get(path_element)
if current_item is None:
return
self._set_or_sum_metric(metrics, metric_path, current_item)
def _set_or_sum_metric(self, metrics, metric_path, value):
"""If we already have a datapoint for this metric, lets add
the value. This is used when the logstash mode is enabled."""
if metric_path in metrics:
metrics[metric_path] += value
else:
metrics[metric_path] = value
def collect_instance_cluster_stats(self, host, port, metrics):
result = self._get(host, port, '_cluster/health')
if not result:
return
self._add_metric(metrics, 'cluster_health.nodes.total',
result, ['number_of_nodes'])
self._add_metric(metrics, 'cluster_health.nodes.data',
result, ['number_of_data_nodes'])
self._add_metric(metrics, 'cluster_health.shards.active_primary',
result, ['active_primary_shards'])
self._add_metric(metrics, 'cluster_health.shards.active',
result, ['active_shards'])
self._add_metric(metrics, 'cluster_health.shards.relocating',
result, ['relocating_shards'])
self._add_metric(metrics, 'cluster_health.shards.unassigned',
result, ['unassigned_shards'])
self._add_metric(metrics, 'cluster_health.shards.initializing',
result, ['initializing_shards'])
def collect_instance_index_stats(self, host, port, metrics):
result = self._get(host, port,
'_stats?clear=true&docs=true&store=true&'
+ 'indexing=true&get=true&search=true', '_all')
if not result:
return
_all = result['_all']
self._index_metrics(metrics, 'indices._all', _all['primaries'])
if 'indices' in _all:
indices = _all['indices']
elif 'indices' in result: # elasticsearch >= 0.90RC2
indices = result['indices']
else:
return
for name, index in indices.iteritems():
self._index_metrics(metrics, 'indices.%s' % name,
index['primaries'])
def collect_instance(self, alias, host, port):
result = self._get(host, port, '_nodes/_local/stats?all=true', 'nodes')
if not result:
return
metrics = {}
node = result['nodes'].keys()[0]
data = result['nodes'][node]
#
# http connections to ES
metrics['http.current'] = data['http']['current_open']
#
# indices
indices = data['indices']
metrics['indices.docs.count'] = indices['docs']['count']
metrics['indices.docs.deleted'] = indices['docs']['deleted']
metrics['indices.datastore.size'] = indices['store']['size_in_bytes']
transport = data['transport']
metrics['transport.rx.count'] = transport['rx_count']
metrics['transport.rx.size'] = transport['rx_size_in_bytes']
metrics['transport.tx.count'] = transport['tx_count']
metrics['transport.tx.size'] = transport['tx_size_in_bytes']
# elasticsearch < 0.90RC2
if 'cache' in indices:
cache = indices['cache']
self._add_metric(metrics, 'cache.bloom.size', cache,
['bloom_size_in_bytes'])
self._add_metric(metrics, 'cache.field.evictions', cache,
['field_evictions'])
self._add_metric(metrics, 'cache.field.size', cache,
['field_size_in_bytes'])
metrics['cache.filter.count'] = cache['filter_count']
metrics['cache.filter.evictions'] = cache['filter_evictions']
metrics['cache.filter.size'] = cache['filter_size_in_bytes']
self._add_metric(metrics, 'cache.id.size', cache,
['id_cache_size_in_bytes'])
# elasticsearch >= 0.90RC2
if 'filter_cache' in indices:
cache = indices['filter_cache']
metrics['cache.filter.evictions'] = cache['evictions']
metrics['cache.filter.size'] = cache['memory_size_in_bytes']
self._add_metric(metrics, 'cache.filter.count', cache, ['count'])
# elasticsearch >= 0.90RC2
if 'id_cache' in indices:
cache = indices['id_cache']
self._add_metric(metrics, 'cache.id.size', cache,
['memory_size_in_bytes'])
# elasticsearch >= 0.90
if 'fielddata' in indices:
fielddata = indices['fielddata']
self._add_metric(metrics, 'fielddata.size', fielddata,
['memory_size_in_bytes'])
self._add_metric(metrics, 'fielddata.evictions', fielddata,
['evictions'])
#
# process mem/cpu (may not be present, depending on access restrictions)
self._add_metric(metrics, 'process.cpu.percent', data,
['process', 'cpu', 'percent'])
self._add_metric(metrics, 'process.mem.resident', data,
['process', 'mem', 'resident_in_bytes'])
self._add_metric(metrics, 'process.mem.share', data,
['process', 'mem', 'share_in_bytes'])
self._add_metric(metrics, 'process.mem.virtual', data,
['process', 'mem', 'total_virtual_in_bytes'])
#
# filesystem (may not be present, depending on access restrictions)
if 'fs' in data and 'data' in data['fs'] and data['fs']['data']:
fs_data = data['fs']['data'][0]
self._add_metric(metrics, 'disk.reads.count', fs_data,
['disk_reads'])
self._add_metric(metrics, 'disk.reads.size', fs_data,
['disk_read_size_in_bytes'])
self._add_metric(metrics, 'disk.writes.count', fs_data,
['disk_writes'])
self._add_metric(metrics, 'disk.writes.size', fs_data,
['disk_write_size_in_bytes'])
#
# jvm
if 'jvm' in self.config['stats']:
jvm = data['jvm']
mem = jvm['mem']
for k in ('heap_used', 'heap_committed', 'non_heap_used',
'non_heap_committed'):
metrics['jvm.mem.%s' % k] = mem['%s_in_bytes' % k]
if 'heap_used_percent' in mem:
metrics['jvm.mem.heap_used_percent'] = mem['heap_used_percent']
for pool, d in mem['pools'].iteritems():
pool = pool.replace(' ', '_')
metrics['jvm.mem.pools.%s.used' % pool] = d['used_in_bytes']
metrics['jvm.mem.pools.%s.max' % pool] = d['max_in_bytes']
metrics['jvm.threads.count'] = jvm['threads']['count']
gc = jvm['gc']
collection_count = 0
collection_time_in_millis = 0
for collector, d in gc['collectors'].iteritems():
metrics['jvm.gc.collection.%s.count' % collector] = d[
'collection_count']
collection_count += d['collection_count']
metrics['jvm.gc.collection.%s.time' % collector] = d[
'collection_time_in_millis']
collection_time_in_millis += d['collection_time_in_millis']
# calculate the totals, as they're absent in elasticsearch > 0.90.10
if 'collection_count' in gc:
metrics['jvm.gc.collection.count'] = gc['collection_count']
else:
metrics['jvm.gc.collection.count'] = collection_count
k = 'collection_time_in_millis'
if k in gc:
metrics['jvm.gc.collection.time'] = gc[k]
else:
metrics['jvm.gc.collection.time'] = collection_time_in_millis
#
# thread_pool
if 'thread_pool' in self.config['stats']:
self._copy_two_level(metrics, 'thread_pool', data['thread_pool'])
#
# network
self._copy_two_level(metrics, 'network', data['network'])
#
# cluster (optional)
if str_to_bool(self.config['cluster']):
self.collect_instance_cluster_stats(host, port, metrics)
#
# indices (optional)
if 'indices' in self.config['stats']:
self.collect_instance_index_stats(host, port, metrics)
#
# all done, now publishing all metrics
for key in metrics:
full_key = key
if alias != '':
full_key = '%s.%s' % (alias, full_key)
self.publish(full_key, metrics[key])
def collect(self):
if json is None:
self.log.error('Unable to import json')
return {}
for alias in sorted(self.instances):
(host, port) = self.instances[alias]
self.collect_instance(alias, host, port) | unknown | codeparrot/codeparrot-clean | ||
it("should hide stack in details", function() {
expect(function f() {
require("./loader!");
}).toThrow();
}); | javascript | github | https://github.com/webpack/webpack | test/cases/compile/error-hide-stack/index.js |
# Copyright 2014 Arista Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import threading
from oslo.config import cfg
from oslo.utils import excutils
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.common import constants as q_const
from neutron.common import log
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron import context as nctx
from neutron.db import db_base_plugin_v2
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.plugins.ml2.driver_context import NetworkContext # noqa
from neutron.plugins.ml2.drivers.arista import arista_l3_driver
LOG = logging.getLogger(__name__)
class AristaL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin):
"""Implements L3 Router service plugin for Arista hardware.
Creates routers in Arista hardware, manages them, adds/deletes interfaces
to the routes.
"""
supported_extension_aliases = ["router", "ext-gw-mode",
"extraroute"]
def __init__(self, driver=None):
self.driver = driver or arista_l3_driver.AristaL3Driver()
self.ndb = arista_l3_driver.NeutronNets()
self.setup_rpc()
self.sync_timeout = cfg.CONF.l3_arista.l3_sync_interval
self.sync_lock = threading.Lock()
self._synchronization_thread()
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = q_rpc.create_connection(new=True)
self.agent_notifiers.update(
{q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
self.endpoints = [l3_rpc.L3RpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
self.conn.consume_in_threads()
def get_plugin_type(self):
return constants.L3_ROUTER_NAT
def get_plugin_description(self):
"""Returns string description of the plugin."""
return ("Arista L3 Router Service Plugin for Arista Hardware "
"based routing")
def _synchronization_thread(self):
with self.sync_lock:
self.synchronize()
self.timer = threading.Timer(self.sync_timeout,
self._synchronization_thread)
self.timer.start()
def stop_synchronization_thread(self):
if self.timer:
self.timer.cancel()
self.timer = None
@log.log
def create_router(self, context, router):
"""Create a new router entry in DB, and create it Arista HW."""
tenant_id = self._get_tenant_id_for_create(context, router['router'])
# Add router to the DB
with context.session.begin(subtransactions=True):
new_router = super(AristaL3ServicePlugin, self).create_router(
context,
router)
# create router on the Arista Hw
try:
self.driver.create_router(context, tenant_id, new_router)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating router on Arista HW router=%s "),
new_router)
super(AristaL3ServicePlugin, self).delete_router(context,
new_router['id'])
@log.log
def update_router(self, context, router_id, router):
"""Update an existing router in DB, and update it in Arista HW."""
with context.session.begin(subtransactions=True):
# Read existing router record from DB
original_router = super(AristaL3ServicePlugin, self).get_router(
context, router_id)
# Update router DB
new_router = super(AristaL3ServicePlugin, self).update_router(
context, router_id, router)
# Modify router on the Arista Hw
try:
self.driver.update_router(context, router_id,
original_router, new_router)
return new_router
except Exception:
LOG.error(_LE("Error updating router on Arista HW router=%s "),
new_router)
@log.log
def delete_router(self, context, router_id):
"""Delete an existing router from Arista HW as well as from the DB."""
router = super(AristaL3ServicePlugin, self).get_router(context,
router_id)
tenant_id = router['tenant_id']
# Delete router on the Arista Hw
try:
self.driver.delete_router(context, tenant_id, router_id, router)
except Exception as e:
LOG.error(_LE("Error deleting router on Arista HW "
"router %(r)s exception=%(e)s"),
{'r': router, 'e': e})
with context.session.begin(subtransactions=True):
super(AristaL3ServicePlugin, self).delete_router(context,
router_id)
@log.log
def add_router_interface(self, context, router_id, interface_info):
"""Add a subnet of a network to an existing router."""
new_router = super(AristaL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
# Get network info for the subnet that is being added to the router.
# Check if the interface information is by port-id or subnet-id
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
if add_by_sub:
subnet = self.get_subnet(context, interface_info['subnet_id'])
elif add_by_port:
port = self.get_port(context, interface_info['port_id'])
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet = self.get_subnet(context, subnet_id)
network_id = subnet['network_id']
# To create SVI's in Arista HW, the segmentation Id is required
# for this network.
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
# Package all the info needed for Hw programming
router = super(AristaL3ServicePlugin, self).get_router(context,
router_id)
router_info = copy.deepcopy(new_router)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
router_info['cidr'] = subnet['cidr']
router_info['gip'] = subnet['gateway_ip']
router_info['ip_version'] = subnet['ip_version']
try:
self.driver.add_router_interface(context, router_info)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error Adding subnet %(subnet)s to "
"router %(router_id)s on Arista HW"),
{'subnet': subnet, 'router_id': router_id})
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info)
@log.log
def remove_router_interface(self, context, router_id, interface_info):
"""Remove a subnet of a network from an existing router."""
new_router = (
super(AristaL3ServicePlugin, self).remove_router_interface(
context, router_id, interface_info))
# Get network information of the subnet that is being removed
subnet = self.get_subnet(context, new_router['subnet_id'])
network_id = subnet['network_id']
# For SVI removal from Arista HW, segmentation ID is needed
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router = super(AristaL3ServicePlugin, self).get_router(context,
router_id)
router_info = copy.deepcopy(new_router)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
try:
self.driver.remove_router_interface(context, router_info)
return new_router
except Exception as exc:
LOG.error(_LE("Error removing interface %(interface)s from "
"router %(router_id)s on Arista HW"
"Exception =(exc)s"),
{'interface': interface_info, 'router_id': router_id,
'exc': exc})
def synchronize(self):
"""Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated.
"""
LOG.info(_LI('Syncing Neutron Router DB <-> EOS'))
ctx = nctx.get_admin_context()
routers = super(AristaL3ServicePlugin, self).get_routers(ctx)
for r in routers:
tenant_id = r['tenant_id']
ports = self.ndb.get_all_ports_for_tenant(tenant_id)
try:
self.driver.create_router(self, tenant_id, r)
except Exception:
continue
# Figure out which interfaces are added to this router
for p in ports:
if p['device_id'] == r['id']:
net_id = p['network_id']
subnet_id = p['fixed_ips'][0]['subnet_id']
subnet = self.ndb.get_subnet_info(subnet_id)
ml2_db = NetworkContext(self, ctx, {'id': net_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
r['seg_id'] = seg_id
r['cidr'] = subnet['cidr']
r['gip'] = subnet['gateway_ip']
r['ip_version'] = subnet['ip_version']
try:
self.driver.add_router_interface(self, r)
except Exception:
LOG.error(_LE("Error Adding interface %(subnet_id)s "
"to router %(router_id)s on Arista HW"),
{'subnet_id': subnet_id, 'router_id': r})
def _validate_interface_info(self, interface_info):
port_id_specified = interface_info and 'port_id' in interface_info
subnet_id_specified = interface_info and 'subnet_id' in interface_info
return port_id_specified, subnet_id_specified | unknown | codeparrot/codeparrot-clean | ||
import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import re
__all__ = ['html_annotate', 'htmldiff']
try:
from html import escape as html_escape
except ImportError:
from cgi import escape as html_escape
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
try:
basestring
except NameError:
# Python 3
basestring = str
############################################################
## Annotation
############################################################
def default_markup(text, version):
return '<span title="%s">%s</span>' % (
html_escape(_unicode(version), 1), text)
def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip()
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new)
def copy_annotations(src, dest):
"""
Copy annotations from the tokens listed in src to the tokens in dest
"""
assert len(src) == len(dest)
for src_tok, dest_tok in zip(src, dest):
dest_tok.annotation = src_tok.annotation
def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result
def compress_merge_back(tokens, tok):
""" Merge tok into the last element of tokens (modifying the list of
tokens in-place). """
last = tokens[-1]
if type(last) is not token or type(tok) is not token:
tokens.append(tok)
else:
text = _unicode(last)
if last.trailing_whitespace:
text += last.trailing_whitespace
text += tok
merged = token(text,
pre_tags=last.pre_tags,
post_tags=tok.post_tags,
trailing_whitespace=tok.trailing_whitespace)
merged.annotation = last.annotation
tokens[-1] = merged
def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += token.trailing_whitespace
yield html
for post in token.post_tags:
yield post
############################################################
## HTML Diffs
############################################################
def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result)
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
if not equal or not token.hide_when_equal:
if token.trailing_whitespace:
yield token.html() + token.trailing_whitespace
else:
yield token.html()
for post in token.post_tags:
yield post
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end)
# These are sentinals to represent the start and end of a <del>
# segment, until we do the cleanup phase to turn them into proper
# markup:
class DEL_START:
pass
class DEL_END:
pass
class NoDeletes(Exception):
""" Raised when the document no longer contains any pending deletes
(DEL_START/DEL_END) """
def merge_delete(del_chunks, doc):
""" Adds the text chunks in del_chunks to the document doc (another
list of text chunks) with marker to show it is a delete.
cleanup_delete later resolves these markers into <del> tags."""
doc.append(DEL_START)
doc.extend(del_chunks)
doc.append(DEL_END)
def cleanup_delete(chunks):
""" Cleans up any DEL_START/DEL_END markers in the document, replacing
them with <del></del>. To do this while keeping the document
valid, it may need to drop some tags (either start or end tags).
It may also move the del into adjacent tags to try to move it to a
similar location where it was originally located (e.g., moving a
delete into preceding <div> tag, if the del looks like (DEL_START,
'Text</div>', DEL_END)"""
while 1:
# Find a pending DEL_START/DEL_END, splitting the document
# into stuff-preceding-DEL_START, stuff-inside, and
# stuff-following-DEL_END
try:
pre_delete, delete, post_delete = split_delete(chunks)
except NoDeletes:
# Nothing found, we've cleaned up the entire doc
break
# The stuff-inside-DEL_START/END may not be well balanced
# markup. First we figure out what unbalanced portions there are:
unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete)
# Then we move the span forward and/or backward based on these
# unbalanced portions:
locate_unbalanced_start(unbalanced_start, pre_delete, post_delete)
locate_unbalanced_end(unbalanced_end, pre_delete, post_delete)
doc = pre_delete
if doc and not doc[-1].endswith(' '):
# Fix up case where the word before us didn't have a trailing space
doc[-1] += ' '
doc.append('<del>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </del>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</del> ')
doc.extend(post_delete)
chunks = doc
return chunks
def split_unbalanced(chunks):
"""Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks."""
start = []
end = []
tag_stack = []
balanced = []
for chunk in chunks:
if not chunk.startswith('<'):
balanced.append(chunk)
continue
endtag = chunk[1] == '/'
name = chunk.split()[0].strip('<>/')
if name in empty_tags:
balanced.append(chunk)
continue
if endtag:
if tag_stack and tag_stack[-1][0] == name:
balanced.append(chunk)
name, pos, tag = tag_stack.pop()
balanced[pos] = tag
elif tag_stack:
start.extend([tag for name, pos, tag in tag_stack])
tag_stack = []
end.append(chunk)
else:
end.append(chunk)
else:
tag_stack.append((name, len(balanced), chunk))
balanced.append(None)
start.extend(
[chunk for name, pos, chunk in tag_stack])
balanced = [chunk for chunk in balanced if chunk is not None]
return start, balanced, end
def split_delete(chunks):
""" Returns (stuff_before_DEL_START, stuff_inside_DEL_START_END,
stuff_after_DEL_END). Returns the first case found (there may be
more DEL_STARTs in stuff_after_DEL_END). Raises NoDeletes if
there's no DEL_START found. """
try:
pos = chunks.index(DEL_START)
except ValueError:
raise NoDeletes
pos2 = chunks.index(DEL_END)
return chunks[:pos], chunks[pos+1:pos2], chunks[pos2+1:]
def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete):
""" pre_delete and post_delete implicitly point to a place in the
document (where the two were split). This moves that point (by
popping items from one and pushing them onto the other). It moves
the point to try to find a place where unbalanced_start applies.
As an example::
>>> unbalanced_start = ['<div>']
>>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
>>> pre, post = doc[:3], doc[3:]
>>> pre, post
(['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
>>> locate_unbalanced_start(unbalanced_start, pre, post)
>>> pre, post
(['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
As you can see, we moved the point so that the dangling <div> that
we found will be effectively replaced by the div in the original
document. If this doesn't work out, we just throw away
unbalanced_start without doing anything.
"""
while 1:
if not unbalanced_start:
# We have totally succeded in finding the position
break
finding = unbalanced_start[0]
finding_name = finding.split()[0].strip('<>')
if not post_delete:
break
next = post_delete[0]
if next is DEL_START or not next.startswith('<'):
# Reached a word, we can't move the delete text forward
break
if next[1] == '/':
# Reached a closing tag, can we go further? Maybe not...
break
name = next.split()[0].strip('<>')
if name == 'ins':
# Can't move into an insert
break
assert name != 'del', (
"Unexpected delete tag: %r" % next)
if name == finding_name:
unbalanced_start.pop(0)
pre_delete.append(post_delete.pop(0))
else:
# Found a tag that doesn't match
break
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete):
""" like locate_unbalanced_start, except handling end tags and
possibly moving the point earlier in the document. """
while 1:
if not unbalanced_end:
# Success
break
finding = unbalanced_end[-1]
finding_name = finding.split()[0].strip('<>/')
if not pre_delete:
break
next = pre_delete[-1]
if next is DEL_END or not next.startswith('</'):
# A word or a start tag
break
name = next.split()[0].strip('<>/')
if name == 'ins' or name == 'del':
# Can't move into an insert or delete
break
if name == finding_name:
unbalanced_end.pop()
post_delete.insert(0, pre_delete.pop())
else:
# Found a tag that doesn't match
break
class token(_unicode):
""" Represents a diffable token, generally a word that is displayed to
the user. Opening tags are attached to this token when they are
adjacent (pre_tags) and closing tags that follow the word
(post_tags). Some exceptions occur when there are empty tags
adjacent to a word, so there may be close tags in pre_tags, or
open tags in post_tags.
We also keep track of whether the word was originally followed by
whitespace, even though we do not want to treat the word as
equivalent to a similar word that does not have a trailing
space."""
# When this is true, the token will be eliminated from the
# displayed diff if no change has occurred:
hide_when_equal = False
def __new__(cls, text, pre_tags=None, post_tags=None, trailing_whitespace=""):
obj = _unicode.__new__(cls, text)
if pre_tags is not None:
obj.pre_tags = pre_tags
else:
obj.pre_tags = []
if post_tags is not None:
obj.post_tags = post_tags
else:
obj.post_tags = []
obj.trailing_whitespace = trailing_whitespace
return obj
def __repr__(self):
return 'token(%s, %r, %r, %r)' % (_unicode.__repr__(self), self.pre_tags,
self.post_tags, self.trailing_whitespace)
def html(self):
return _unicode(self)
class tag_token(token):
""" Represents a token that is actually a tag. Currently this is just
the <img> tag, which takes up visible space just like a word but
is only represented in a document by a tag. """
def __new__(cls, tag, data, html_repr, pre_tags=None,
post_tags=None, trailing_whitespace=""):
obj = token.__new__(cls, "%s: %s" % (type, data),
pre_tags=pre_tags,
post_tags=post_tags,
trailing_whitespace=trailing_whitespace)
obj.tag = tag
obj.data = data
obj.html_repr = html_repr
return obj
def __repr__(self):
return 'tag_token(%s, %s, html_repr=%s, post_tags=%r, pre_tags=%r, trailing_whitespace=%r)' % (
self.tag,
self.data,
self.html_repr,
self.pre_tags,
self.post_tags,
self.trailing_whitespace)
def html(self):
return self.html_repr
class href_token(token):
""" Represents the href in an anchor tag. Unlike other words, we only
show the href when it changes. """
hide_when_equal = True
def html(self):
return ' Link: %s' % self
def tokenize(html, include_hrefs=True):
"""
Parse the given HTML and returns token objects (words with attached tags).
This parses only the content of a page; anything in the head is
ignored, and the <head> and <body> elements are themselves
optional. The content is then parsed by lxml, which ensures the
validity of the resulting parsed document (though lxml may make
incorrect guesses when the markup is particular bad).
<ins> and <del> tags are also eliminated from the document, as
that gets confusing.
If include_hrefs is true, then the href attribute of <a> tags is
included as a special kind of diffable token."""
if etree.iselement(html):
body_el = html
else:
body_el = parse_html(html, cleanup=True)
# Then we split the document into text chunks for each tag, word, and end tag:
chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs)
# Finally re-joining them into token objects:
return fixup_chunks(chunks)
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True)
_body_re = re.compile(r'<body.*?>', re.I|re.S)
_end_body_re = re.compile(r'</body.*?>', re.I|re.S)
_ins_del_re = re.compile(r'</?(ins|del).*?>', re.I|re.S)
def cleanup_html(html):
""" This 'cleans' the HTML, meaning that any page structure is removed
(only the contents of <body> are used, if there is any <body).
Also <ins> and <del> tags are removed. """
match = _body_re.search(html)
if match:
html = html[match.end():]
match = _end_body_re.search(html)
if match:
html = html[:match.start()]
html = _ins_del_re.sub('', html)
return html
end_whitespace_re = re.compile(r'[ \t\n\r]$')
def split_trailing_whitespace(word):
"""
This function takes a word, such as 'test\n\n' and returns ('test','\n\n')
"""
stripped_length = len(word.rstrip())
return word[0:stripped_length], word[stripped_length:]
def fixup_chunks(chunks):
"""
This function takes a list of chunks and produces a list of tokens.
"""
tag_accum = []
cur_word = None
result = []
for chunk in chunks:
if isinstance(chunk, tuple):
if chunk[0] == 'img':
src = chunk[1]
tag, trailing_whitespace = split_trailing_whitespace(chunk[2])
cur_word = tag_token('img', src, html_repr=tag,
pre_tags=tag_accum,
trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif chunk[0] == 'href':
href = chunk[1]
cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=" ")
tag_accum = []
result.append(cur_word)
continue
if is_word(chunk):
chunk, trailing_whitespace = split_trailing_whitespace(chunk)
cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif is_start_tag(chunk):
tag_accum.append(chunk)
elif is_end_tag(chunk):
if tag_accum:
tag_accum.append(chunk)
else:
assert cur_word, (
"Weird state, cur_word=%r, result=%r, chunks=%r of %r"
% (cur_word, result, chunk, chunks))
cur_word.post_tags.append(chunk)
else:
assert(0)
if not result:
return [token('', pre_tags=tag_accum)]
else:
result[-1].post_tags.extend(tag_accum)
return result
# All the tags in HTML that don't require end tags:
empty_tags = (
'param', 'img', 'area', 'br', 'basefont', 'input',
'base', 'meta', 'link', 'col')
block_level_tags = (
'address',
'blockquote',
'center',
'dir',
'div',
'dl',
'fieldset',
'form',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'isindex',
'menu',
'noframes',
'noscript',
'ol',
'p',
'pre',
'table',
'ul',
)
block_level_container_tags = (
'dd',
'dt',
'frameset',
'li',
'tbody',
'td',
'tfoot',
'th',
'thead',
'tr',
)
def flatten_el(el, include_hrefs, skip_tag=False):
""" Takes an lxml element el, and generates all the text chunks for
that tag. Each start tag is a chunk, each word is a chunk, and each
end tag is a chunk.
If skip_tag is true, then the outermost container tag is
not returned (just its contents)."""
if not skip_tag:
if el.tag == 'img':
yield ('img', el.get('src'), start_tag(el))
else:
yield start_tag(el)
if el.tag in empty_tags and not el.text and not len(el) and not el.tail:
return
start_words = split_words(el.text)
for word in start_words:
yield html_escape(word)
for child in el:
for item in flatten_el(child, include_hrefs=include_hrefs):
yield item
if el.tag == 'a' and el.get('href') and include_hrefs:
yield ('href', el.get('href'))
if not skip_tag:
yield end_tag(el)
end_words = split_words(el.tail)
for word in end_words:
yield html_escape(word)
split_words_re = re.compile(r'\S+(?:\s+|$)', re.U)
def split_words(text):
""" Splits some text into words. Includes trailing whitespace
on each word when appropriate. """
if not text or not text.strip():
return []
words = split_words_re.findall(text)
return words
start_whitespace_re = re.compile(r'^[ \t\n\r]')
def start_tag(el):
"""
The text representation of the start tag for a tag.
"""
return '<%s%s>' % (
el.tag, ''.join([' %s="%s"' % (name, html_escape(value, True))
for name, value in el.attrib.items()]))
def end_tag(el):
""" The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. """
if el.tail and start_whitespace_re.search(el.tail):
extra = ' '
else:
extra = ''
return '</%s>%s' % (el.tag, extra)
def is_word(tok):
return not tok.startswith('<')
def is_end_tag(tok):
return tok.startswith('</')
def is_start_tag(tok):
return tok.startswith('<') and not tok.startswith('</')
def fixup_ins_del_tags(html):
""" Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> """
doc = parse_html(html, cleanup=False)
_fixup_ins_del_tags(doc)
html = serialize_html_fragment(doc, skip_outer=True)
return html
def serialize_html_fragment(el, skip_outer=False):
""" Serialize a single lxml element as HTML. The serialized form
includes the elements tail.
If skip_outer is true, then don't serialize the outermost tag
"""
assert not isinstance(el, basestring), (
"You should pass in an element, not a string like %r" % el)
html = etree.tostring(el, method="html", encoding=_unicode)
if skip_outer:
# Get rid of the extra starting tag:
html = html[html.find('>')+1:]
# Get rid of the extra end tag:
html = html[:html.rfind('<')]
return html.strip()
else:
return html
def _fixup_ins_del_tags(doc):
"""fixup_ins_del_tags that works on an lxml document in-place
"""
for tag in ['ins', 'del']:
for el in doc.xpath('descendant-or-self::%s' % tag):
if not _contains_block_level_tag(el):
continue
_move_el_inside_block(el, tag=tag)
el.drop_tag()
#_merge_element_contents(el)
def _contains_block_level_tag(el):
"""True if the element contains any block-level elements, like <p>, <td>, etc.
"""
if el.tag in block_level_tags or el.tag in block_level_container_tags:
return True
for child in el:
if _contains_block_level_tag(child):
return True
return False
def _move_el_inside_block(el, tag):
""" helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. """
for child in el:
if _contains_block_level_tag(child):
break
else:
import sys
# No block-level tags in any child
children_tag = etree.Element(tag)
children_tag.text = el.text
el.text = None
children_tag.extend(list(el))
el[:] = [children_tag]
return
for child in list(el):
if _contains_block_level_tag(child):
_move_el_inside_block(child, tag)
if child.tail:
tail_tag = etree.Element(tag)
tail_tag.text = child.tail
child.tail = None
el.insert(el.index(child)+1, tail_tag)
else:
child_tag = etree.Element(tag)
el.replace(child, child_tag)
child_tag.append(child)
if el.text:
text_tag = etree.Element(tag)
text_tag.text = el.text
el.text = None
el.insert(0, text_tag)
def _merge_element_contents(el):
"""
Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p>
"""
parent = el.getparent()
text = el.text or ''
if el.tail:
if not len(el):
text += el.tail
else:
if el[-1].tail:
el[-1].tail += el.tail
else:
el[-1].tail = el.tail
index = parent.index(el)
if text:
if index == 0:
previous = None
else:
previous = parent[index-1]
if previous is None:
if parent.text:
parent.text += text
else:
parent.text = text
else:
if previous.tail:
previous.tail += text
else:
previous.tail = text
parent[index:index+1] = el.getchildren()
class InsensitiveSequenceMatcher(difflib.SequenceMatcher):
"""
Acts like SequenceMatcher, but tries not to find very small equal
blocks amidst large spans of changes
"""
threshold = 2
def get_matching_blocks(self):
size = min(len(self.b), len(self.b))
threshold = min(self.threshold, size / 4)
actual = difflib.SequenceMatcher.get_matching_blocks(self)
return [item for item in actual
if item[2] > threshold
or not item[2]]
if __name__ == '__main__':
from lxml.html import _diffcommand
_diffcommand.main() | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2019 Bootlin
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/xylon,logicvc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xylon LogiCVC multi-function device
maintainers:
- Paul Kocialkowski <paul.kocialkowski@bootlin.com>
description: |
The LogiCVC is a display controller that also contains a GPIO controller.
As a result, a multi-function device is exposed as parent of the display
and GPIO blocks.
properties:
compatible:
items:
- enum:
- xylon,logicvc-3.02.a
- const: syscon
- const: simple-mfd
reg:
maxItems: 1
'#address-cells':
const: 1
'#size-cells':
const: 1
select:
properties:
compatible:
contains:
enum:
- xylon,logicvc-3.02.a
required:
- compatible
patternProperties:
"^gpio@[0-9a-f]+$":
$ref: /schemas/gpio/xylon,logicvc-gpio.yaml#
"^display@[0-9a-f]+$":
$ref: /schemas/display/xylon,logicvc-display.yaml#
required:
- compatible
- reg
additionalProperties: false
examples:
- |
logicvc: logicvc@43c00000 {
compatible = "xylon,logicvc-3.02.a", "syscon", "simple-mfd";
reg = <0x43c00000 0x6000>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mfd/xylon,logicvc.yaml |
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Relation(models.Model):
pass
class AbstractPerson(models.Model):
# DATA fields
data_abstract = models.CharField(max_length=10)
fk_abstract = models.ForeignKey(Relation, models.CASCADE, related_name='fk_abstract_rel')
# M2M fields
m2m_abstract = models.ManyToManyField(Relation, related_name='m2m_abstract_rel')
friends_abstract = models.ManyToManyField('self', related_name='friends_abstract', symmetrical=True)
following_abstract = models.ManyToManyField('self', related_name='followers_abstract', symmetrical=False)
# VIRTUAL fields
data_not_concrete_abstract = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='fo_abstract_rel',
)
# GFK fields
content_type_abstract = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_abstract = models.PositiveIntegerField()
content_object_abstract = GenericForeignKey('content_type_abstract', 'object_id_abstract')
# GR fields
generic_relation_abstract = GenericRelation(Relation)
class Meta:
abstract = True
class BasePerson(AbstractPerson):
# DATA fields
data_base = models.CharField(max_length=10)
fk_base = models.ForeignKey(Relation, models.CASCADE, related_name='fk_base_rel')
# M2M fields
m2m_base = models.ManyToManyField(Relation, related_name='m2m_base_rel')
friends_base = models.ManyToManyField('self', related_name='friends_base', symmetrical=True)
following_base = models.ManyToManyField('self', related_name='followers_base', symmetrical=False)
# VIRTUAL fields
data_not_concrete_base = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['base_non_concrete_id'],
to_fields=['id'],
related_name='fo_base_rel',
)
# GFK fields
content_type_base = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_base = models.PositiveIntegerField()
content_object_base = GenericForeignKey('content_type_base', 'object_id_base')
# GR fields
generic_relation_base = GenericRelation(Relation)
class Person(BasePerson):
# DATA fields
data_inherited = models.CharField(max_length=10)
fk_inherited = models.ForeignKey(Relation, models.CASCADE, related_name='fk_concrete_rel')
# M2M Fields
m2m_inherited = models.ManyToManyField(Relation, related_name='m2m_concrete_rel')
friends_inherited = models.ManyToManyField('self', related_name='friends_concrete', symmetrical=True)
following_inherited = models.ManyToManyField('self', related_name='followers_concrete', symmetrical=False)
# VIRTUAL fields
data_not_concrete_inherited = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['model_non_concrete_id'],
to_fields=['id'],
related_name='fo_concrete_rel',
)
# GFK fields
content_type_concrete = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_concrete = models.PositiveIntegerField()
content_object_concrete = GenericForeignKey('content_type_concrete', 'object_id_concrete')
# GR fields
generic_relation_concrete = GenericRelation(Relation)
class ProxyPerson(Person):
class Meta:
proxy = True
class PersonThroughProxySubclass(ProxyPerson):
pass
class Relating(models.Model):
# ForeignKey to BasePerson
baseperson = models.ForeignKey(BasePerson, models.CASCADE, related_name='relating_baseperson')
baseperson_hidden = models.ForeignKey(BasePerson, models.CASCADE, related_name='+')
# ForeignKey to Person
person = models.ForeignKey(Person, models.CASCADE, related_name='relating_person')
person_hidden = models.ForeignKey(Person, models.CASCADE, related_name='+')
# ForeignKey to ProxyPerson
proxyperson = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='relating_proxyperson')
proxyperson_hidden = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='relating_proxyperson_hidden+')
# ManyToManyField to BasePerson
basepeople = models.ManyToManyField(BasePerson, related_name='relating_basepeople')
basepeople_hidden = models.ManyToManyField(BasePerson, related_name='+')
# ManyToManyField to Person
people = models.ManyToManyField(Person, related_name='relating_people')
people_hidden = models.ManyToManyField(Person, related_name='+')
# ParentListTests models
class CommonAncestor(models.Model):
pass
class FirstParent(CommonAncestor):
first_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class SecondParent(CommonAncestor):
second_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class Child(FirstParent, SecondParent):
pass | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2000, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
Without limiting anything contained in the foregoing, this file,
which is part of C Driver for MySQL (Connector/C), is also subject to the
Universal FOSS Exception, version 1.0, a copy of which can be found at
http://oss.oracle.com/licenses/universal-foss-exception.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/**
@file include/mysql_com.h
Common definition between mysql server & client.
*/
#ifndef _mysql_com_h
#define _mysql_com_h
#ifndef MYSQL_ABI_CHECK
#include <stdbool.h>
#include <stdint.h>
#endif
#include "my_command.h"
#include "my_compress.h"
/*
We need a definition for my_socket. On the client, <mysql.h> already provides
it, but on the server side, we need to get it from a header.
*/
#ifndef my_socket_defined
#include "my_io.h"
#include "mysql/components/services/bits/my_io_bits.h"
#endif
#ifndef MYSQL_ABI_CHECK
#include <stdbool.h>
#endif
#define SYSTEM_CHARSET_MBMAXLEN 3
#define FILENAME_CHARSET_MBMAXLEN 5
#define NAME_CHAR_LEN 64 /**< Field/table name length */
#define PARTITION_EXPR_CHAR_LEN \
2048 /**< Maximum expression length in chars \
*/
#define USERNAME_CHAR_LENGTH 32
#define USERNAME_CHAR_LENGTH_STR "32"
#ifndef NAME_LEN
#define NAME_LEN (NAME_CHAR_LEN * SYSTEM_CHARSET_MBMAXLEN)
#endif
#define USERNAME_LENGTH (USERNAME_CHAR_LENGTH * SYSTEM_CHARSET_MBMAXLEN)
#define CONNECT_STRING_MAXLEN 1024
#define MYSQL_AUTODETECT_CHARSET_NAME "auto"
#define SERVER_VERSION_LENGTH 60
#define SQLSTATE_LENGTH 5
/*
In FIDO terminology, relying party is the server where required services are
running. Relying party ID is unique name given to server.
*/
#define RELYING_PARTY_ID_LENGTH 255
/* Length of random salt sent during fido registration */
#define CHALLENGE_LENGTH 32
/* Maximum authentication factors server supports */
#define MAX_AUTH_FACTORS 3
/**
Maximum length of comments
pre 5.6: 60 characters
*/
#define TABLE_COMMENT_INLINE_MAXLEN 180
#define TABLE_COMMENT_MAXLEN 2048
#define COLUMN_COMMENT_MAXLEN 1024
#define INDEX_COMMENT_MAXLEN 1024
#define TABLE_PARTITION_COMMENT_MAXLEN 1024
#define TABLESPACE_COMMENT_MAXLEN 2048
/**
Maximum length of protocol packet.
@ref page_protocol_basic_ok_packet length limit also restricted to this value
as any length greater than this value will have first byte of
@ref page_protocol_basic_ok_packet to be 254 thus does not
provide a means to identify if this is @ref page_protocol_basic_ok_packet or
@ref page_protocol_basic_eof_packet.
*/
#define MAX_PACKET_LENGTH (256L * 256L * 256L - 1)
#define LOCAL_HOST "localhost"
#define LOCAL_HOST_NAMEDPIPE "."
#if defined(_WIN32)
#define MYSQL_NAMEDPIPE "MySQL"
#define MYSQL_SERVICENAME "MySQL"
#endif /* _WIN32 */
/** The length of the header part for each generated column in the .frm file.*/
#define FRM_GCOL_HEADER_SIZE 4
/**
Maximum length of the expression statement defined for generated columns.
*/
#define GENERATED_COLUMN_EXPRESSION_MAXLEN 65535 - FRM_GCOL_HEADER_SIZE
/**
Length of random string sent by server on handshake; this is also length of
obfuscated password, received from client
*/
#define SCRAMBLE_LENGTH 20
#define AUTH_PLUGIN_DATA_PART_1_LENGTH 8
/** length of password stored in the db: new passwords are preceded with '*'*/
#define SCRAMBLED_PASSWORD_CHAR_LENGTH (SCRAMBLE_LENGTH * 2 + 1)
/**
@defgroup group_cs_column_definition_flags Column Definition Flags
@ingroup group_cs
@brief Values for the flags bitmask used by ::Send_field:flags
Currently need to fit into 32 bits.
Each bit represents an optional feature of the protocol.
Both the client and the server are sending these.
The intersection of the two determines what optional parts of the
protocol will be used.
*/
/**
@addtogroup group_cs_column_definition_flags
@{
*/
#define NOT_NULL_FLAG 1 /**< Field can't be NULL */
#define PRI_KEY_FLAG 2 /**< Field is part of a primary key */
#define UNIQUE_KEY_FLAG 4 /**< Field is part of a unique key */
#define MULTIPLE_KEY_FLAG 8 /**< Field is part of a key */
#define BLOB_FLAG 16 /**< Field is a blob */
#define UNSIGNED_FLAG 32 /**< Field is unsigned */
#define ZEROFILL_FLAG 64 /**< Field is zerofill */
#define BINARY_FLAG 128 /**< Field is binary */
/* The following are only sent to new clients */
#define ENUM_FLAG 256 /**< field is an enum */
#define AUTO_INCREMENT_FLAG 512 /**< field is a autoincrement field */
#define TIMESTAMP_FLAG 1024 /**< Field is a timestamp */
#define SET_FLAG 2048 /**< field is a set */
#define NO_DEFAULT_VALUE_FLAG 4096 /**< Field doesn't have default value */
#define ON_UPDATE_NOW_FLAG 8192 /**< Field is set to NOW on UPDATE */
#define PART_KEY_FLAG 16384 /**< Intern; Part of some key */
#define NUM_FLAG 32768 /**< Field is num (for clients) */
#define UNIQUE_FLAG 65536 /**< Intern: Used by sql_yacc */
#define BINCMP_FLAG 131072 /**< Intern: Used by sql_yacc */
#define GET_FIXED_FIELDS_FLAG \
(1 << 18) /**< Used to get fields in item tree \
*/
#define FIELD_IN_PART_FUNC_FLAG (1 << 19) /**< Field part of partition func */
/**
Intern: Field in TABLE object for new version of altered table,
which participates in a newly added index.
*/
#define FIELD_IN_ADD_INDEX (1 << 20)
#define FIELD_IS_RENAMED (1 << 21) /**< Intern: Field is being renamed */
#define FIELD_FLAGS_STORAGE_MEDIA 22 /**< Field storage media, bit 22-23 */
#define FIELD_FLAGS_STORAGE_MEDIA_MASK (3 << FIELD_FLAGS_STORAGE_MEDIA)
#define FIELD_FLAGS_COLUMN_FORMAT 24 /**< Field column format, bit 24-25 */
#define FIELD_FLAGS_COLUMN_FORMAT_MASK (3 << FIELD_FLAGS_COLUMN_FORMAT)
#define FIELD_IS_DROPPED (1 << 26) /**< Intern: Field is being dropped */
#define EXPLICIT_NULL_FLAG \
(1 << 27) /**< Field is explicitly specified as \
NULL by the user */
#define GROUP_FLAG \
(1 << 28) /**< Intern: Group field. Transient use in \
create_tmp_table */
/** Field will not be loaded in secondary engine. */
#define NOT_SECONDARY_FLAG (1 << 29)
/** Field is explicitly marked as invisible by the user. */
#define FIELD_IS_INVISIBLE (1 << 30)
/** @}*/
/**
@defgroup group_cs_com_refresh_flags COM_REFRESH Flags
@ingroup group_cs
@brief Values for the `sub_command` in ::COM_REFRESH
Currently the protocol carries only 8 bits of these flags.
The rest (8-end) are used only internally in the server.
*/
/**
@addtogroup group_cs_com_refresh_flags
@{
*/
#define REFRESH_GRANT 1 /**< Refresh grant tables, FLUSH PRIVILEGES */
#define REFRESH_LOG 2 /**< Start on new log file, FLUSH LOGS */
#define REFRESH_TABLES 4 /**< close all tables, FLUSH TABLES */
#define UNUSED_8 \
8 /**< Previously REFRESH_HOSTS but not used anymore. Use TRUNCATE TABLE \
performance_schema.host_cache instead */
#define REFRESH_STATUS 16 /**< Flush status variables, FLUSH STATUS */
#define UNUSED_32 32 /**< Removed. Used to be flush thread cache */
#define REFRESH_REPLICA \
64 /**< Reset source info and restart replica \
thread, RESET REPLICA */
#define REFRESH_SOURCE \
128 /**< Remove all bin logs in the index \
and truncate the index. Also resets \
GTID information. Command: \
RESET BINARY LOGS AND GTIDS */
#define REFRESH_ERROR_LOG 256 /**< Rotate only the error log */
#define REFRESH_ENGINE_LOG 512 /**< Flush all storage engine logs */
#define REFRESH_BINARY_LOG 1024 /**< Flush the binary log */
#define REFRESH_RELAY_LOG 2048 /**< Flush the relay log */
#define REFRESH_GENERAL_LOG 4096 /**< Flush the general log */
#define REFRESH_SLOW_LOG 8192 /**< Flush the slow query log */
#define REFRESH_READ_LOCK 16384 /**< Lock tables for read. */
/**
Wait for an impending flush before closing the tables.
@sa REFRESH_READ_LOCK, handle_reload_request, close_cached_tables
*/
#define REFRESH_FAST 32768
#define REFRESH_USER_RESOURCES \
0x80000L /** FLUSH RESOURCES. @sa ::reset_mqh \
*/
#define REFRESH_FOR_EXPORT 0x100000L /** FLUSH TABLES ... FOR EXPORT */
#define REFRESH_OPTIMIZER_COSTS 0x200000L /** FLUSH OPTIMIZER_COSTS */
#define REFRESH_PERSIST 0x400000L /** RESET PERSIST */
/** @}*/
/**
@defgroup group_cs_capabilities_flags Capabilities Flags
@ingroup group_cs
@brief Values for the capabilities flag bitmask used by the MySQL protocol
Currently need to fit into 32 bits.
Each bit represents an optional feature of the protocol.
Both the client and the server are sending these.
The intersection of the two determines whast optional parts of the
protocol will be used.
*/
/**
@addtogroup group_cs_capabilities_flags
@{
*/
/**
Use the improved version of Old Password Authentication.
Not used.
@note Assumed to be set since 4.1.1.
*/
#define CLIENT_LONG_PASSWORD 1
/**
Send found rows instead of affected rows in @ref
page_protocol_basic_eof_packet
*/
#define CLIENT_FOUND_ROWS 2
/**
@brief Get all column flags
Longer flags in Protocol::ColumnDefinition320.
@todo Reference Protocol::ColumnDefinition320
Server
------
Supports longer flags.
Client
------
Expects longer flags.
*/
#define CLIENT_LONG_FLAG 4
/**
Database (schema) name can be specified on connect in Handshake Response
Packet.
@todo Reference Handshake Response Packet.
Server
------
Supports schema-name in Handshake Response Packet.
Client
------
Handshake Response Packet contains a schema-name.
@sa send_client_reply_packet()
*/
#define CLIENT_CONNECT_WITH_DB 8
#define CLIENT_NO_SCHEMA \
16 /**< DEPRECATED: Don't allow database.table.column */
/**
Compression protocol supported.
@todo Reference Compression
Server
------
Supports compression.
Client
------
Switches to Compression compressed protocol after successful authentication.
*/
#define CLIENT_COMPRESS 32
/**
Special handling of ODBC behavior.
@note No special behavior since 3.22.
*/
#define CLIENT_ODBC 64
/**
Can use LOAD DATA LOCAL.
Server
------
Enables the LOCAL INFILE request of LOAD DATA|XML.
Client
------
Will handle LOCAL INFILE request.
*/
#define CLIENT_LOCAL_FILES 128
/**
Ignore spaces before '('
Server
------
Parser can ignore spaces before '('.
Client
------
Let the parser ignore spaces before '('.
*/
#define CLIENT_IGNORE_SPACE 256
/**
New 4.1 protocol
@todo Reference the new 4.1 protocol
Server
------
Supports the 4.1 protocol.
Client
------
Uses the 4.1 protocol.
@note this value was CLIENT_CHANGE_USER in 3.22, unused in 4.0
*/
#define CLIENT_PROTOCOL_41 512
/**
This is an interactive client
Use @ref System_variables::net_wait_timeout
versus @ref System_variables::net_interactive_timeout.
Server
------
Supports interactive and noninteractive clients.
Client
------
Client is interactive.
@sa mysql_real_connect()
*/
#define CLIENT_INTERACTIVE 1024
/**
Use SSL encryption for the session
@todo Reference SSL
Server
------
Supports SSL
Client
------
Switch to SSL after sending the capability-flags.
*/
#define CLIENT_SSL 2048
/**
Client only flag. Not used.
Client
------
Do not issue SIGPIPE if network failures occur (libmysqlclient only).
@sa mysql_real_connect()
*/
#define CLIENT_IGNORE_SIGPIPE 4096
/**
Client knows about transactions
Server
------
Can send status flags in @ref page_protocol_basic_ok_packet /
@ref page_protocol_basic_eof_packet.
Client
------
Expects status flags in @ref page_protocol_basic_ok_packet /
@ref page_protocol_basic_eof_packet.
@note This flag is optional in 3.23, but always set by the server since 4.0.
@sa send_server_handshake_packet(), parse_client_handshake_packet(),
net_send_ok(), net_send_eof()
*/
#define CLIENT_TRANSACTIONS 8192
#define CLIENT_RESERVED 16384 /**< DEPRECATED: Old flag for 4.1 protocol */
#define CLIENT_RESERVED2 \
32768 /**< DEPRECATED: Old flag for 4.1 authentication \
CLIENT_SECURE_CONNECTION */
/**
Enable/disable multi-stmt support
Also sets @ref CLIENT_MULTI_RESULTS. Currently not checked anywhere.
Server
------
Can handle multiple statements per COM_QUERY and COM_STMT_PREPARE.
Client
-------
May send multiple statements per COM_QUERY and COM_STMT_PREPARE.
@note Was named ::CLIENT_MULTI_QUERIES in 4.1.0, renamed later.
Requires
--------
::CLIENT_PROTOCOL_41
@todo Reference COM_QUERY and COM_STMT_PREPARE
*/
#define CLIENT_MULTI_STATEMENTS (1UL << 16)
/**
Enable/disable multi-results
Server
------
Can send multiple resultsets for COM_QUERY.
Error if the server needs to send them and client
does not support them.
Client
-------
Can handle multiple resultsets for COM_QUERY.
Requires
--------
::CLIENT_PROTOCOL_41
@sa mysql_execute_command(), sp_head::MULTI_RESULTS
*/
#define CLIENT_MULTI_RESULTS (1UL << 17)
/**
Multi-results and OUT parameters in PS-protocol.
Server
------
Can send multiple resultsets for COM_STMT_EXECUTE.
Client
------
Can handle multiple resultsets for COM_STMT_EXECUTE.
Requires
--------
::CLIENT_PROTOCOL_41
@todo Reference COM_STMT_EXECUTE and PS-protocol
@sa Protocol_binary::send_out_parameters
*/
#define CLIENT_PS_MULTI_RESULTS (1UL << 18)
/**
Client supports plugin authentication
Server
------
Sends extra data in Initial Handshake Packet and supports the pluggable
authentication protocol.
Client
------
Supports authentication plugins.
Requires
--------
::CLIENT_PROTOCOL_41
@todo Reference plugin authentication, Initial Handshake Packet,
Authentication plugins
@sa send_change_user_packet(), send_client_reply_packet(), run_plugin_auth(),
parse_com_change_user_packet(), parse_client_handshake_packet()
*/
#define CLIENT_PLUGIN_AUTH (1UL << 19)
/**
Client supports connection attributes
Server
------
Permits connection attributes in Protocol::HandshakeResponse41.
Client
------
Sends connection attributes in Protocol::HandshakeResponse41.
@todo Reference Protocol::HandshakeResponse41
@sa send_client_connect_attrs(), read_client_connect_attrs()
*/
#define CLIENT_CONNECT_ATTRS (1UL << 20)
/**
Enable authentication response packet to be larger than 255 bytes.
When the ability to change default plugin require that the initial password
field in the Protocol::HandshakeResponse41 paclet can be of arbitrary size.
However, the 4.1 client-server protocol limits the length of the
auth-data-field sent from client to server to 255 bytes.
The solution is to change the type of the field to a true length encoded
string and indicate the protocol change
with this client capability flag.
Server
------
Understands length-encoded integer for auth response data in
Protocol::HandshakeResponse41.
Client
------
Length of auth response data in Protocol::HandshakeResponse41
is a length-encoded integer.
@todo Reference Protocol::HandshakeResponse41
@note The flag was introduced in 5.6.6, but had the wrong value.
@sa send_client_reply_packet(), parse_client_handshake_packet(),
get_56_lenc_string(), get_41_lenc_string()
*/
#define CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA (1UL << 21)
/**
Don't close the connection for a user account with expired password.
Server
------
Announces support for expired password extension.
Client
------
Can handle expired passwords.
@todo Reference expired password
@sa MYSQL_OPT_CAN_HANDLE_EXPIRED_PASSWORDS, disconnect_on_expired_password
ACL_USER::password_expired, check_password_lifetime(), acl_authenticate()
*/
#define CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS (1UL << 22)
/**
Capable of handling server state change information. Its a hint to the
server to include the state change information in
@ref page_protocol_basic_ok_packet.
Server
------
Can set ::SERVER_SESSION_STATE_CHANGED in the ::SERVER_STATUS_flags_enum
and send @ref sect_protocol_basic_ok_packet_sessinfo in a
@ref page_protocol_basic_ok_packet.
Client
------
Expects the server to send @ref sect_protocol_basic_ok_packet_sessinfo in
a @ref page_protocol_basic_ok_packet.
@sa enum_session_state_type, read_ok_ex(), net_send_ok(), Session_tracker,
State_tracker
*/
#define CLIENT_SESSION_TRACK (1UL << 23)
/**
Client no longer needs @ref page_protocol_basic_eof_packet and will
use @ref page_protocol_basic_ok_packet instead.
@sa net_send_ok()
Server
------
Can send OK after a Text Resultset.
Client
------
Expects an @ref page_protocol_basic_ok_packet (instead of
@ref page_protocol_basic_eof_packet) after the resultset rows of a
Text Resultset.
Background
----------
To support ::CLIENT_SESSION_TRACK, additional information must be sent after
all successful commands. Although the @ref page_protocol_basic_ok_packet is
extensible, the @ref page_protocol_basic_eof_packet is not due to the overlap
of its bytes with the content of the Text Resultset Row.
Therefore, the @ref page_protocol_basic_eof_packet in the
Text Resultset is replaced with an @ref page_protocol_basic_ok_packet.
@ref page_protocol_basic_eof_packet is deprecated as of MySQL 5.7.5.
@todo Reference Text Resultset
@sa cli_safe_read_with_ok(), read_ok_ex(), net_send_ok(), net_send_eof()
*/
#define CLIENT_DEPRECATE_EOF (1UL << 24)
/**
The client can handle optional metadata information in the resultset.
*/
#define CLIENT_OPTIONAL_RESULTSET_METADATA (1UL << 25)
/**
Compression protocol extended to support zstd compression method
This capability flag is used to send zstd compression level between
client and server provided both client and server are enabled with
this flag.
Server
------
Server sets this flag when global variable protocol-compression-algorithms
has zstd in its list of supported values.
Client
------
Client sets this flag when it is configured to use zstd compression method.
*/
#define CLIENT_ZSTD_COMPRESSION_ALGORITHM (1UL << 26)
/**
Support optional extension for query parameters into the @ref
page_protocol_com_query and @ref page_protocol_com_stmt_execute packets.
Server
------
Expects an optional part containing the query parameter set(s). Executes the
query for each set of parameters or returns an error if more than 1 set of
parameters is sent and the server can't execute it.
Client
------
Can send the optional part containing the query parameter set(s).
*/
#define CLIENT_QUERY_ATTRIBUTES (1UL << 27)
/**
Support Multi factor authentication.
Server
------
Server sends AuthNextFactor packet after every nth factor authentication
method succeeds, except the last factor authentication.
Client
------
Client reads AuthNextFactor packet sent by server and initiates next factor
authentication method.
*/
#define MULTI_FACTOR_AUTHENTICATION (1UL << 28)
/**
This flag will be reserved to extend the 32bit capabilities structure to
64bits.
*/
#define CLIENT_CAPABILITY_EXTENSION (1UL << 29)
/**
Verify server certificate.
Client only flag.
@deprecated in favor of --ssl-mode.
*/
#define CLIENT_SSL_VERIFY_SERVER_CERT (1UL << 30)
/**
Don't reset the options after an unsuccessful connect
Client only flag.
Typically passed via ::mysql_real_connect() 's client_flag parameter.
@sa mysql_real_connect()
*/
#define CLIENT_REMEMBER_OPTIONS (1UL << 31)
/** @}*/
/** a compatibility alias for CLIENT_COMPRESS */
#define CAN_CLIENT_COMPRESS CLIENT_COMPRESS
/** Gather all possible capabilities (flags) supported by the server */
#define CLIENT_ALL_FLAGS \
(CLIENT_LONG_PASSWORD | CLIENT_FOUND_ROWS | CLIENT_LONG_FLAG | \
CLIENT_CONNECT_WITH_DB | CLIENT_NO_SCHEMA | CLIENT_COMPRESS | CLIENT_ODBC | \
CLIENT_LOCAL_FILES | CLIENT_IGNORE_SPACE | CLIENT_PROTOCOL_41 | \
CLIENT_INTERACTIVE | CLIENT_SSL | CLIENT_IGNORE_SIGPIPE | \
CLIENT_TRANSACTIONS | CLIENT_RESERVED | CLIENT_RESERVED2 | \
CLIENT_MULTI_STATEMENTS | CLIENT_MULTI_RESULTS | CLIENT_PS_MULTI_RESULTS | \
CLIENT_SSL_VERIFY_SERVER_CERT | CLIENT_REMEMBER_OPTIONS | \
CLIENT_PLUGIN_AUTH | CLIENT_CONNECT_ATTRS | \
CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | \
CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS | CLIENT_SESSION_TRACK | \
CLIENT_DEPRECATE_EOF | CLIENT_OPTIONAL_RESULTSET_METADATA | \
CLIENT_ZSTD_COMPRESSION_ALGORITHM | CLIENT_QUERY_ATTRIBUTES | \
MULTI_FACTOR_AUTHENTICATION)
/**
Switch off from ::CLIENT_ALL_FLAGS the flags that are optional and
depending on build flags.
If any of the optional flags is supported by the build it will be switched
on before sending to the client during the connection handshake.
*/
#define CLIENT_BASIC_FLAGS \
(CLIENT_ALL_FLAGS & \
~(CLIENT_SSL | CLIENT_COMPRESS | CLIENT_SSL_VERIFY_SERVER_CERT | \
CLIENT_ZSTD_COMPRESSION_ALGORITHM))
/** The status flags are a bit-field */
enum SERVER_STATUS_flags_enum {
/**
Is raised when a multi-statement transaction
has been started, either explicitly, by means
of BEGIN or COMMIT AND CHAIN, or
implicitly, by the first transactional
statement, when autocommit=off.
*/
SERVER_STATUS_IN_TRANS = 1,
SERVER_STATUS_AUTOCOMMIT = 2, /**< Server in auto_commit mode */
SERVER_MORE_RESULTS_EXISTS = 8, /**< Multi query - next query exists */
SERVER_QUERY_NO_GOOD_INDEX_USED = 16,
SERVER_QUERY_NO_INDEX_USED = 32,
/**
The server was able to fulfill the clients request and opened a
read-only non-scrollable cursor for a query. This flag comes
in reply to COM_STMT_EXECUTE and COM_STMT_FETCH commands.
Used by Binary Protocol Resultset to signal that COM_STMT_FETCH
must be used to fetch the row-data.
@todo Refify "Binary Protocol Resultset" and "COM_STMT_FETCH".
*/
SERVER_STATUS_CURSOR_EXISTS = 64,
/**
This flag is sent when a read-only cursor is exhausted, in reply to
COM_STMT_FETCH command.
*/
SERVER_STATUS_LAST_ROW_SENT = 128,
SERVER_STATUS_DB_DROPPED = 256, /**< A database was dropped */
SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512,
/**
Sent to the client if after a prepared statement reprepare
we discovered that the new statement returns a different
number of result set columns.
*/
SERVER_STATUS_METADATA_CHANGED = 1024,
SERVER_QUERY_WAS_SLOW = 2048,
/**
To mark ResultSet containing output parameter values.
*/
SERVER_PS_OUT_PARAMS = 4096,
/**
Set at the same time as SERVER_STATUS_IN_TRANS if the started
multi-statement transaction is a read-only transaction. Cleared
when the transaction commits or aborts. Since this flag is sent
to clients in OK and EOF packets, the flag indicates the
transaction status at the end of command execution.
*/
SERVER_STATUS_IN_TRANS_READONLY = 8192,
/**
This status flag, when on, implies that one of the state information has
changed on the server because of the execution of the last statement.
*/
SERVER_SESSION_STATE_CHANGED = (1UL << 14)
};
/**
Server status flags that must be cleared when starting
execution of a new SQL statement.
Flags from this set are only added to the
current server status by the execution engine, but
never removed -- the execution engine expects them
to disappear automagically by the next command.
*/
#define SERVER_STATUS_CLEAR_SET \
(SERVER_QUERY_NO_GOOD_INDEX_USED | SERVER_QUERY_NO_INDEX_USED | \
SERVER_MORE_RESULTS_EXISTS | SERVER_STATUS_METADATA_CHANGED | \
SERVER_QUERY_WAS_SLOW | SERVER_STATUS_DB_DROPPED | \
SERVER_STATUS_CURSOR_EXISTS | SERVER_STATUS_LAST_ROW_SENT | \
SERVER_SESSION_STATE_CHANGED)
/** Max length of a error message. Should be kept in sync with ::ERRMSGSIZE. */
#define MYSQL_ERRMSG_SIZE 512
#define NET_READ_TIMEOUT 30 /**< Timeout on read */
#define NET_WRITE_TIMEOUT 60 /**< Timeout on write */
#define NET_WAIT_TIMEOUT 8 * 60 * 60 /**< Wait for new query */
/**
Flag used by the parser. Kill only the query and not the connection.
@sa SQLCOM_KILL, sql_kill(), LEX::type
*/
#define ONLY_KILL_QUERY 1
#ifndef MYSQL_VIO
struct Vio;
#define MYSQL_VIO struct Vio *
#endif
#define MAX_TINYINT_WIDTH 3 /**< Max width for a TINY w.o. sign */
#define MAX_SMALLINT_WIDTH 5 /**< Max width for a SHORT w.o. sign */
#define MAX_MEDIUMINT_WIDTH 8 /**< Max width for a INT24 w.o. sign */
#define MAX_INT_WIDTH 10 /**< Max width for a LONG w.o. sign */
#define MAX_BIGINT_WIDTH 20 /**< Max width for a LONGLONG */
/// Max width for a CHAR column, in number of characters
#define MAX_CHAR_WIDTH 255
/// Default width for blob in bytes @todo - align this with sizes from field.h
#define MAX_BLOB_WIDTH 16777216
#define NET_ERROR_UNSET 0 /**< No error has occurred yet */
#define NET_ERROR_SOCKET_RECOVERABLE 1 /**< Socket still usable */
#define NET_ERROR_SOCKET_UNUSABLE 2 /**< Do not use the socket */
#define NET_ERROR_SOCKET_NOT_READABLE 3 /**< Try write and close socket */
#define NET_ERROR_SOCKET_NOT_WRITABLE 4 /**< Try read and close socket */
typedef struct NET {
MYSQL_VIO vio;
unsigned char *buff, *buff_end, *write_pos, *read_pos;
my_socket fd; /* For Perl DBI/dbd */
/**
Set if we are doing several queries in one
command ( as in LOAD TABLE ... FROM MASTER ),
and do not want to confuse the client with OK at the wrong time
*/
unsigned long remain_in_buf, length, buf_length, where_b;
unsigned long max_packet, max_packet_size;
unsigned int pkt_nr, compress_pkt_nr;
unsigned int write_timeout, read_timeout, retry_count;
int fcntl;
unsigned int *return_status;
unsigned char reading_or_writing;
unsigned char save_char;
bool compress;
unsigned int last_errno;
unsigned char error;
/** Client library error message buffer. Actually belongs to struct MYSQL. */
char last_error[MYSQL_ERRMSG_SIZE];
/** Client library sqlstate buffer. Set along with the error message. */
char sqlstate[SQLSTATE_LENGTH + 1];
/**
Extension pointer, for the caller private use.
Any program linking with the networking library can use this pointer,
which is handy when private connection specific data needs to be
maintained.
The mysqld server process uses this pointer internally,
to maintain the server internal instrumentation for the connection.
*/
void *extension;
} NET;
#define packet_error (~(unsigned long)0)
/**
@addtogroup group_cs_backward_compatibility Backward compatibility
@ingroup group_cs
@{
*/
#define CLIENT_MULTI_QUERIES CLIENT_MULTI_STATEMENTS
#define FIELD_TYPE_DECIMAL MYSQL_TYPE_DECIMAL
#define FIELD_TYPE_NEWDECIMAL MYSQL_TYPE_NEWDECIMAL
#define FIELD_TYPE_TINY MYSQL_TYPE_TINY
#define FIELD_TYPE_SHORT MYSQL_TYPE_SHORT
#define FIELD_TYPE_LONG MYSQL_TYPE_LONG
#define FIELD_TYPE_FLOAT MYSQL_TYPE_FLOAT
#define FIELD_TYPE_DOUBLE MYSQL_TYPE_DOUBLE
#define FIELD_TYPE_NULL MYSQL_TYPE_NULL
#define FIELD_TYPE_TIMESTAMP MYSQL_TYPE_TIMESTAMP
#define FIELD_TYPE_LONGLONG MYSQL_TYPE_LONGLONG
#define FIELD_TYPE_INT24 MYSQL_TYPE_INT24
#define FIELD_TYPE_DATE MYSQL_TYPE_DATE
#define FIELD_TYPE_TIME MYSQL_TYPE_TIME
#define FIELD_TYPE_DATETIME MYSQL_TYPE_DATETIME
#define FIELD_TYPE_YEAR MYSQL_TYPE_YEAR
#define FIELD_TYPE_NEWDATE MYSQL_TYPE_NEWDATE
#define FIELD_TYPE_ENUM MYSQL_TYPE_ENUM
#define FIELD_TYPE_SET MYSQL_TYPE_SET
#define FIELD_TYPE_TINY_BLOB MYSQL_TYPE_TINY_BLOB
#define FIELD_TYPE_MEDIUM_BLOB MYSQL_TYPE_MEDIUM_BLOB
#define FIELD_TYPE_LONG_BLOB MYSQL_TYPE_LONG_BLOB
#define FIELD_TYPE_BLOB MYSQL_TYPE_BLOB
#define FIELD_TYPE_VAR_STRING MYSQL_TYPE_VAR_STRING
#define FIELD_TYPE_STRING MYSQL_TYPE_STRING
#define FIELD_TYPE_CHAR MYSQL_TYPE_TINY
#define FIELD_TYPE_INTERVAL MYSQL_TYPE_ENUM
#define FIELD_TYPE_GEOMETRY MYSQL_TYPE_GEOMETRY
#define FIELD_TYPE_BIT MYSQL_TYPE_BIT
/** @}*/
/**
@addtogroup group_cs_shutdown_kill_constants Shutdown/kill enums and constants
@ingroup group_cs
@sa THD::is_killable
@{
*/
#define MYSQL_SHUTDOWN_KILLABLE_CONNECT (unsigned char)(1 << 0)
#define MYSQL_SHUTDOWN_KILLABLE_TRANS (unsigned char)(1 << 1)
#define MYSQL_SHUTDOWN_KILLABLE_LOCK_TABLE (unsigned char)(1 << 2)
#define MYSQL_SHUTDOWN_KILLABLE_UPDATE (unsigned char)(1 << 3)
/**
We want levels to be in growing order of hardness (because we use number
comparisons).
@note ::SHUTDOWN_DEFAULT does not respect the growing property, but it's ok.
*/
enum mysql_enum_shutdown_level {
SHUTDOWN_DEFAULT = 0,
/** Wait for existing connections to finish */
SHUTDOWN_WAIT_CONNECTIONS = MYSQL_SHUTDOWN_KILLABLE_CONNECT,
/** Wait for existing transactons to finish */
SHUTDOWN_WAIT_TRANSACTIONS = MYSQL_SHUTDOWN_KILLABLE_TRANS,
/** Wait for existing updates to finish (=> no partial MyISAM update) */
SHUTDOWN_WAIT_UPDATES = MYSQL_SHUTDOWN_KILLABLE_UPDATE,
/** Flush InnoDB buffers and other storage engines' buffers*/
SHUTDOWN_WAIT_ALL_BUFFERS = (MYSQL_SHUTDOWN_KILLABLE_UPDATE << 1),
/** Don't flush InnoDB buffers, flush other storage engines' buffers*/
SHUTDOWN_WAIT_CRITICAL_BUFFERS = (MYSQL_SHUTDOWN_KILLABLE_UPDATE << 1) + 1,
/** Query level of the KILL command */
KILL_QUERY = 254,
/** Connection level of the KILL command */
KILL_CONNECTION = 255
};
/** @}*/
enum enum_resultset_metadata {
/** No metadata will be sent. */
RESULTSET_METADATA_NONE = 0,
/** The server will send all metadata. */
RESULTSET_METADATA_FULL = 1
};
#if defined(__clang__)
// disable -Wdocumentation to workaround
// https://bugs.llvm.org/show_bug.cgi?id=38905
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
#endif
/**
The flags used in COM_STMT_EXECUTE.
@sa @ref Protocol_classic::parse_packet, @ref mysql_int_serialize_param_data
*/
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
enum enum_cursor_type {
CURSOR_TYPE_NO_CURSOR = 0,
CURSOR_TYPE_READ_ONLY = 1,
CURSOR_TYPE_FOR_UPDATE = 2,
CURSOR_TYPE_SCROLLABLE = 4,
/**
On when the client will send the parameter count
even for 0 parameters.
*/
PARAMETER_COUNT_AVAILABLE = 8
};
/** options for ::mysql_options() */
enum enum_mysql_set_option {
MYSQL_OPTION_MULTI_STATEMENTS_ON,
MYSQL_OPTION_MULTI_STATEMENTS_OFF
};
/**
Type of state change information that the server can include in the Ok
packet.
@note
- session_state_type shouldn't go past 255 (i.e. 1-byte boundary).
- Modify the definition of ::SESSION_TRACK_END when a new member is added.
*/
enum enum_session_state_type {
SESSION_TRACK_SYSTEM_VARIABLES, /**< Session system variables */
SESSION_TRACK_SCHEMA, /**< Current schema */
SESSION_TRACK_STATE_CHANGE, /**< track session state changes */
SESSION_TRACK_GTIDS, /**< See also: session_track_gtids */
SESSION_TRACK_TRANSACTION_CHARACTERISTICS, /**< Transaction chistics */
SESSION_TRACK_TRANSACTION_STATE /**< Transaction state */
};
/** start of ::enum_session_state_type */
#define SESSION_TRACK_BEGIN SESSION_TRACK_SYSTEM_VARIABLES
/** End of ::enum_session_state_type */
#define SESSION_TRACK_END SESSION_TRACK_TRANSACTION_STATE
/** is T a valid session state type */
#define IS_SESSION_STATE_TYPE(T) \
(((int)(T) >= SESSION_TRACK_BEGIN) && ((T) <= SESSION_TRACK_END))
#define net_new_transaction(net) ((net)->pkt_nr = 0)
bool my_net_init(struct NET *net, MYSQL_VIO vio);
void my_net_local_init(struct NET *net);
void net_end(struct NET *net);
void net_clear(struct NET *net, bool check_buffer);
void net_claim_memory_ownership(struct NET *net, bool claim);
bool net_realloc(struct NET *net, size_t length);
bool net_flush(struct NET *net);
bool my_net_write(struct NET *net, const unsigned char *packet, size_t len);
bool net_write_command(struct NET *net, unsigned char command,
const unsigned char *header, size_t head_len,
const unsigned char *packet, size_t len);
bool net_write_packet(struct NET *net, const unsigned char *packet,
size_t length);
unsigned long my_net_read(struct NET *net);
void my_net_set_write_timeout(struct NET *net, unsigned int timeout);
void my_net_set_read_timeout(struct NET *net, unsigned int timeout);
void my_net_set_retry_count(struct NET *net, unsigned int retry_count);
struct rand_struct {
unsigned long seed1, seed2, max_value;
double max_value_dbl;
};
/* Include the types here so existing UDFs can keep compiling */
#include "mysql/udf_registration_types.h"
/**
@addtogroup group_cs_compresson_constants Constants when using compression
@ingroup group_cs
@{
*/
#define NET_HEADER_SIZE 4 /**< standard header size */
#define COMP_HEADER_SIZE 3 /**< compression header extra size */
/** @}*/
/* Prototypes to password functions */
/* used in both client and server */
char *octet2hex(char *to, const char *str, unsigned int len);
/* end of password.c */
bool generate_sha256_scramble(unsigned char *dst, size_t dst_size,
const char *src, size_t src_size, const char *rnd,
size_t rnd_size);
// extern "C" since it is an (undocumented) part of the libmysql ABI.
#ifdef __cplusplus
extern "C" {
#endif
char *get_tty_password(const char *opt_message);
#ifdef __cplusplus
}
#endif
const char *mysql_errno_to_sqlstate(unsigned int mysql_errno);
/* Some other useful functions */
bool my_thread_init(void);
void my_thread_end(void);
#ifdef STDCALL
unsigned long STDCALL net_field_length(unsigned char **packet);
unsigned long STDCALL net_field_length_checked(unsigned char **packet,
unsigned long max_length);
#endif
uint64_t net_field_length_ll(unsigned char **packet);
unsigned char *net_store_length(unsigned char *pkg, unsigned long long length);
unsigned int net_length_size(unsigned long long num);
unsigned int net_field_length_size(const unsigned char *pos);
uint64_t net_length_size_including_self(uint64_t length_without_self);
#define NULL_LENGTH ((unsigned long)~0) /**< For ::net_store_length() */
#define MYSQL_STMT_HEADER 4
#define MYSQL_LONG_DATA_HEADER 6
/* clang-format off */
/**
Describes the current state of Asynchronous connection phase state machine
@startuml
[*] --> CONNECT_STAGE_INVALID
[*] --> CONNECT_STAGE_NOT_STARTED
CONNECT_STAGE_NOT_STARTED --> CONNECT_STAGE_NET_BEGIN_CONNECT
CONNECT_STAGE_NOT_STARTED --> CONNECT_STAGE_COMPLETE
CONNECT_STAGE_NET_BEGIN_CONNECT --> CONNECT_STAGE_NET_WAIT_CONNECT
CONNECT_STAGE_NET_BEGIN_CONNECT --> CONNECT_STAGE_NET_COMPLETE_CONNECT
CONNECT_STAGE_NET_BEGIN_CONNECT --> STATE_MACHINE_FAILED
CONNECT_STAGE_NET_WAIT_CONNECT --> CONNECT_STAGE_NET_COMPLETE_CONNECT
CONNECT_STAGE_NET_WAIT_CONNECT --> STATE_MACHINE_FAILED
CONNECT_STAGE_NET_COMPLETE_CONNECT --> STATE_MACHINE_FAILED
CONNECT_STAGE_NET_COMPLETE_CONNECT --> CONNECT_STAGE_READ_GREETING
CONNECT_STAGE_READ_GREETING --> STATE_MACHINE_FAILED
CONNECT_STAGE_READ_GREETING --> CONNECT_STAGE_PARSE_HANDSHAKE
CONNECT_STAGE_PARSE_HANDSHAKE --> STATE_MACHINE_FAILED
CONNECT_STAGE_PARSE_HANDSHAKE --> CONNECT_STAGE_ESTABLISH_SSL
CONNECT_STAGE_ESTABLISH_SSL --> STATE_MACHINE_FAILED
CONNECT_STAGE_ESTABLISH_SSL --> CONNECT_STAGE_AUTHENTICATE
CONNECT_STAGE_AUTHENTICATE --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTHENTICATE --> CONNECT_STAGE_AUTH_BEGIN
CONNECT_STAGE_AUTH_BEGIN --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_BEGIN --> CONNECT_STAGE_AUTH_RUN_FIRST_AUTHENTICATE_USER
CONNECT_STAGE_AUTH_RUN_FIRST_AUTHENTICATE_USER --> CONNECT_STAGE_AUTH_HANDLE_FIRST_AUTHENTICATE_USER
CONNECT_STAGE_AUTH_HANDLE_FIRST_AUTHENTICATE_USER --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_HANDLE_FIRST_AUTHENTICATE_USER --> CONNECT_STAGE_AUTH_READ_CHANGE_USER_RESULT
CONNECT_STAGE_AUTH_READ_CHANGE_USER_RESULT --> CONNECT_STAGE_AUTH_HANDLE_CHANGE_USER_REQUEST
CONNECT_STAGE_AUTH_HANDLE_CHANGE_USER_REQUEST --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_HANDLE_CHANGE_USER_REQUEST --> CONNECT_STAGE_AUTH_RUN_SECOND_AUTHENTICATE_USER
CONNECT_STAGE_AUTH_HANDLE_CHANGE_USER_REQUEST --> CONNECT_STAGE_AUTH_INIT_MULTI_AUTH
CONNECT_STAGE_AUTH_HANDLE_CHANGE_USER_REQUEST --> CONNECT_STAGE_AUTH_FINISH_AUTH
CONNECT_STAGE_AUTH_RUN_SECOND_AUTHENTICATE_USER --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_RUN_SECOND_AUTHENTICATE_USER --> CONNECT_STAGE_AUTH_HANDLE_SECOND_AUTHENTICATE_USER
CONNECT_STAGE_AUTH_HANDLE_SECOND_AUTHENTICATE_USER --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_HANDLE_SECOND_AUTHENTICATE_USER --> CONNECT_STAGE_AUTH_INIT_MULTI_AUTH
CONNECT_STAGE_AUTH_HANDLE_SECOND_AUTHENTICATE_USER --> CONNECT_STAGE_AUTH_FINISH_AUTH
CONNECT_STAGE_AUTH_INIT_MULTI_AUTH --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_INIT_MULTI_AUTH --> CONNECT_STAGE_AUTH_DO_MULTI_PLUGIN_AUTH
CONNECT_STAGE_AUTH_DO_MULTI_PLUGIN_AUTH --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_DO_MULTI_PLUGIN_AUTH --> CONNECT_STAGE_AUTH_HANDLE_MULTI_AUTH_RESPONSE
CONNECT_STAGE_AUTH_HANDLE_MULTI_AUTH_RESPONSE --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_HANDLE_MULTI_AUTH_RESPONSE --> CONNECT_STAGE_AUTH_INIT_MULTI_AUTH
CONNECT_STAGE_AUTH_HANDLE_MULTI_AUTH_RESPONSE --> CONNECT_STAGE_AUTH_FINISH_AUTH
CONNECT_STAGE_AUTH_FINISH_AUTH --> STATE_MACHINE_FAILED
CONNECT_STAGE_AUTH_FINISH_AUTH --> CONNECT_STAGE_PREP_SELECT_DATABASE
CONNECT_STAGE_PREP_SELECT_DATABASE --> CONNECT_STAGE_COMPLETE
CONNECT_STAGE_PREP_SELECT_DATABASE --> CONNECT_STAGE_PREP_INIT_COMMANDS
CONNECT_STAGE_PREP_INIT_COMMANDS --> CONNECT_STAGE_COMPLETE
CONNECT_STAGE_PREP_INIT_COMMANDS --> CONNECT_STAGE_SEND_ONE_INIT_COMMAND
CONNECT_STAGE_SEND_ONE_INIT_COMMAND --> CONNECT_STAGE_SEND_ONE_INIT_COMMAND
CONNECT_STAGE_SEND_ONE_INIT_COMMAND --> STATE_MACHINE_FAILED
CONNECT_STAGE_SEND_ONE_INIT_COMMAND --> CONNECT_STAGE_COMPLETE
STATE_MACHINE_FAILED --> [*]
CONNECT_STAGE_COMPLETE --> [*]
CONNECT_STAGE_INVALID --> [*]
@enduml
*/
/* clang-format on */
enum connect_stage {
/** MYSQL not valid or an unknown state */
CONNECT_STAGE_INVALID = 0,
/** not connected */
CONNECT_STAGE_NOT_STARTED,
/** begin connection to the server */
CONNECT_STAGE_NET_BEGIN_CONNECT,
/** wait for connection to be established */
CONNECT_STAGE_NET_WAIT_CONNECT,
/** init the local data structures post connect */
CONNECT_STAGE_NET_COMPLETE_CONNECT,
/** read the first packet */
CONNECT_STAGE_READ_GREETING,
/** parse the first packet */
CONNECT_STAGE_PARSE_HANDSHAKE,
/** tls establishment */
CONNECT_STAGE_ESTABLISH_SSL,
/** authentication phase */
CONNECT_STAGE_AUTHENTICATE,
/** determine the plugin to use */
CONNECT_STAGE_AUTH_BEGIN,
/** run first auth plugin */
CONNECT_STAGE_AUTH_RUN_FIRST_AUTHENTICATE_USER,
/** handle the result of the first auth plugin run */
CONNECT_STAGE_AUTH_HANDLE_FIRST_AUTHENTICATE_USER,
/** read the implied changed user auth, if any */
CONNECT_STAGE_AUTH_READ_CHANGE_USER_RESULT,
/** Check if server asked to use a different authentication plugin */
CONNECT_STAGE_AUTH_HANDLE_CHANGE_USER_REQUEST,
/** Start the authentication process again with the plugin which
server asked for */
CONNECT_STAGE_AUTH_RUN_SECOND_AUTHENTICATE_USER,
/** Start multi factor authentication */
CONNECT_STAGE_AUTH_INIT_MULTI_AUTH,
/** Final cleanup */
CONNECT_STAGE_AUTH_FINISH_AUTH,
/** Now read the results of the second plugin run */
CONNECT_STAGE_AUTH_HANDLE_SECOND_AUTHENTICATE_USER,
/** Invoke client plugins multi-auth authentication method */
CONNECT_STAGE_AUTH_DO_MULTI_PLUGIN_AUTH,
/** Handle response from client plugins authentication method */
CONNECT_STAGE_AUTH_HANDLE_MULTI_AUTH_RESPONSE,
/** Authenticated, set initial database if specified */
CONNECT_STAGE_PREP_SELECT_DATABASE,
/** Prepare to send a sequence of init commands. */
CONNECT_STAGE_PREP_INIT_COMMANDS,
/** Send an init command. This is called once per init command until
they've all been run (or a failure occurs) */
CONNECT_STAGE_SEND_ONE_INIT_COMMAND,
/** Connected or no async connect in progress */
CONNECT_STAGE_COMPLETE
};
#endif | c | github | https://github.com/mysql/mysql-server | include/mysql_com.h |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package ssh
import (
"context"
"fmt"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
// Structure to hold roles that are allowed to accept any IP address.
type zeroAddressRoles struct {
Roles []string `json:"roles" mapstructure:"roles"`
}
func pathConfigZeroAddress(b *backend) *framework.Path {
return &framework.Path{
Pattern: "config/zeroaddress",
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixSSH,
},
Fields: map[string]*framework.FieldSchema{
"roles": {
Type: framework.TypeCommaStringSlice,
Description: `[Required] Comma separated list of role names which
allows credentials to be requested for any IP address. CIDR blocks
previously registered under these roles will be ignored.`,
},
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.UpdateOperation: &framework.PathOperation{
Callback: b.pathConfigZeroAddressWrite,
DisplayAttrs: &framework.DisplayAttributes{
OperationVerb: "configure",
OperationSuffix: "zero-address",
},
},
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathConfigZeroAddressRead,
DisplayAttrs: &framework.DisplayAttributes{
OperationSuffix: "zero-address-configuration",
},
},
logical.DeleteOperation: &framework.PathOperation{
Callback: b.pathConfigZeroAddressDelete,
DisplayAttrs: &framework.DisplayAttributes{
OperationSuffix: "zero-address-configuration",
},
},
},
HelpSynopsis: pathConfigZeroAddressSyn,
HelpDescription: pathConfigZeroAddressDesc,
}
}
func (b *backend) pathConfigZeroAddressDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete(ctx, "config/zeroaddress")
if err != nil {
return nil, err
}
b.Backend.TryRecordObservationWithRequest(ctx, req, ObservationTypeSSHConfigZeroAddressDelete, nil)
return nil, nil
}
func (b *backend) pathConfigZeroAddressRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entry, err := b.getZeroAddressRoles(ctx, req.Storage)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
b.Backend.TryRecordObservationWithRequest(ctx, req, ObservationTypeSSHConfigZeroAddressRead, map[string]interface{}{
"role_names": entry.Roles,
})
return &logical.Response{
Data: map[string]interface{}{
"roles": entry.Roles,
},
}, nil
}
func (b *backend) pathConfigZeroAddressWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
roles := d.Get("roles").([]string)
if len(roles) == 0 {
return logical.ErrorResponse("Missing roles"), nil
}
// Check if the roles listed actually exist in the backend
for _, item := range roles {
role, err := b.getRole(ctx, req.Storage, item)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("Role %q does not exist", item)), nil
}
}
err := b.putZeroAddressRoles(ctx, req.Storage, roles)
if err != nil {
return nil, err
}
b.Backend.TryRecordObservationWithRequest(ctx, req, ObservationTypeSSHConfigZeroAddressWrite, map[string]interface{}{
"role_names": roles,
})
return nil, nil
}
// Stores the given list of roles at zeroaddress endpoint
func (b *backend) putZeroAddressRoles(ctx context.Context, s logical.Storage, roles []string) error {
entry, err := logical.StorageEntryJSON("config/zeroaddress", &zeroAddressRoles{
Roles: roles,
})
if err != nil {
return err
}
if err := s.Put(ctx, entry); err != nil {
return err
}
return nil
}
// Retrieves the list of roles from the zeroaddress endpoint.
func (b *backend) getZeroAddressRoles(ctx context.Context, s logical.Storage) (*zeroAddressRoles, error) {
entry, err := s.Get(ctx, "config/zeroaddress")
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result zeroAddressRoles
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
// Removes a role from the list of roles present in config/zeroaddress path
func (b *backend) removeZeroAddressRole(ctx context.Context, s logical.Storage, roleName string) error {
zeroAddressEntry, err := b.getZeroAddressRoles(ctx, s)
if err != nil {
return err
}
if zeroAddressEntry == nil {
return nil
}
zeroAddressEntry.Roles = strutil.StrListDelete(zeroAddressEntry.Roles, roleName)
return b.putZeroAddressRoles(ctx, s, zeroAddressEntry.Roles)
}
const pathConfigZeroAddressSyn = `
Assign zero address as default CIDR block for select roles.
`
const pathConfigZeroAddressDesc = `
Administrator can choose to make a select few registered roles to accept any IP
address, overriding the CIDR blocks registered during creation of roles. This
doesn't mean that the credentials are created for any IP address. Clients who
have access to these roles are trusted to make valid requests. Access to these
roles should be controlled using Vault policies. It is recommended that all the
roles that are allowed to accept any IP address should have an explicit policy
of deny for unintended clients.
This is a root authenticated endpoint. If backend is mounted at 'ssh' then use
the endpoint 'ssh/config/zeroaddress' to provide the list of allowed roles.
After mounting the backend, use 'path-help' for additional information.
` | go | github | https://github.com/hashicorp/vault | builtin/logical/ssh/path_config_zeroaddress.go |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.core import cache
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 4)
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
default_cache = cache.caches[cache.DEFAULT_CACHE_ALIAS]
second_cache = cache.caches['second']
default_cache.set('foo', 'bar')
second_cache.get('foo')
self.assertEqual(len(self.panel.calls), 2)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_response.
"""
cache.cache.get('café')
self.panel.process_response(self.request, self.response)
# ensure the panel does not have content yet.
self.assertNotIn('café', self.panel.content)
self.panel.generate_stats(self.request, self.response)
# ensure the panel renders correctly.
self.assertIn('café', self.panel.content) | unknown | codeparrot/codeparrot-clean | ||
use rustc_abi::{FieldIdx, VariantIdx};
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::*;
use rustc_middle::thir::*;
use rustc_middle::ty;
use rustc_middle::ty::cast::mir_cast_kind;
use rustc_span::Span;
use rustc_span::source_map::Spanned;
use super::{PResult, ParseCtxt, parse_by_kind};
use crate::builder::custom::ParseError;
use crate::builder::expr::as_constant::as_constant_inner;
impl<'a, 'tcx> ParseCtxt<'a, 'tcx> {
pub(crate) fn parse_statement(&self, expr_id: ExprId) -> PResult<StatementKind<'tcx>> {
parse_by_kind!(self, expr_id, _, "statement",
@call(mir_storage_live, args) => {
Ok(StatementKind::StorageLive(self.parse_local(args[0])?))
},
@call(mir_storage_dead, args) => {
Ok(StatementKind::StorageDead(self.parse_local(args[0])?))
},
@call(mir_assume, args) => {
let op = self.parse_operand(args[0])?;
Ok(StatementKind::Intrinsic(Box::new(NonDivergingIntrinsic::Assume(op))))
},
@call(mir_retag, args) => {
Ok(StatementKind::Retag(RetagKind::Default, Box::new(self.parse_place(args[0])?)))
},
@call(mir_set_discriminant, args) => {
let place = self.parse_place(args[0])?;
let var = self.parse_integer_literal(args[1])? as u32;
Ok(StatementKind::SetDiscriminant {
place: Box::new(place),
variant_index: VariantIdx::from_u32(var),
})
},
ExprKind::Assign { lhs, rhs } => {
let lhs = self.parse_place(*lhs)?;
let rhs = self.parse_rvalue(*rhs)?;
Ok(StatementKind::Assign(Box::new((lhs, rhs))))
},
)
}
pub(crate) fn parse_terminator(&self, expr_id: ExprId) -> PResult<TerminatorKind<'tcx>> {
parse_by_kind!(self, expr_id, expr, "terminator",
@call(mir_return, _args) => {
Ok(TerminatorKind::Return)
},
@call(mir_goto, args) => {
Ok(TerminatorKind::Goto { target: self.parse_block(args[0])? } )
},
@call(mir_unreachable, _args) => {
Ok(TerminatorKind::Unreachable)
},
@call(mir_unwind_resume, _args) => {
Ok(TerminatorKind::UnwindResume)
},
@call(mir_unwind_terminate, args) => {
Ok(TerminatorKind::UnwindTerminate(self.parse_unwind_terminate_reason(args[0])?))
},
@call(mir_drop, args) => {
Ok(TerminatorKind::Drop {
place: self.parse_place(args[0])?,
target: self.parse_return_to(args[1])?,
unwind: self.parse_unwind_action(args[2])?,
replace: false,
drop: None,
async_fut: None,
})
},
@call(mir_call, args) => {
self.parse_call(args)
},
@call(mir_tail_call, args) => {
self.parse_tail_call(args)
},
ExprKind::Match { scrutinee, arms, .. } => {
let discr = self.parse_operand(*scrutinee)?;
self.parse_match(arms, expr.span).map(|t| TerminatorKind::SwitchInt { discr, targets: t })
},
)
}
fn parse_unwind_terminate_reason(&self, expr_id: ExprId) -> PResult<UnwindTerminateReason> {
parse_by_kind!(self, expr_id, _, "unwind terminate reason",
@variant(mir_unwind_terminate_reason, Abi) => {
Ok(UnwindTerminateReason::Abi)
},
@variant(mir_unwind_terminate_reason, InCleanup) => {
Ok(UnwindTerminateReason::InCleanup)
},
)
}
fn parse_unwind_action(&self, expr_id: ExprId) -> PResult<UnwindAction> {
parse_by_kind!(self, expr_id, _, "unwind action",
@call(mir_unwind_continue, _args) => {
Ok(UnwindAction::Continue)
},
@call(mir_unwind_unreachable, _args) => {
Ok(UnwindAction::Unreachable)
},
@call(mir_unwind_terminate, args) => {
Ok(UnwindAction::Terminate(self.parse_unwind_terminate_reason(args[0])?))
},
@call(mir_unwind_cleanup, args) => {
Ok(UnwindAction::Cleanup(self.parse_block(args[0])?))
},
)
}
fn parse_return_to(&self, expr_id: ExprId) -> PResult<BasicBlock> {
parse_by_kind!(self, expr_id, _, "return block",
@call(mir_return_to, args) => {
self.parse_block(args[0])
},
)
}
fn parse_match(&self, arms: &[ArmId], span: Span) -> PResult<SwitchTargets> {
let Some((otherwise, rest)) = arms.split_last() else {
return Err(ParseError {
span,
item_description: "no arms".to_string(),
expected: "at least one arm".to_string(),
});
};
let otherwise = &self.thir[*otherwise];
let PatKind::Wild = otherwise.pattern.kind else {
return Err(ParseError {
span: otherwise.span,
item_description: format!("{:?}", otherwise.pattern.kind),
expected: "wildcard pattern".to_string(),
});
};
let otherwise = self.parse_block(otherwise.body)?;
let mut values = Vec::new();
let mut targets = Vec::new();
for arm in rest {
let arm = &self.thir[*arm];
let value = match arm.pattern.kind {
PatKind::Constant { value } => value,
_ => {
return Err(ParseError {
span: arm.pattern.span,
item_description: format!("{:?}", arm.pattern.kind),
expected: "constant pattern".to_string(),
});
}
};
values.push(value.to_leaf().to_bits_unchecked());
targets.push(self.parse_block(arm.body)?);
}
Ok(SwitchTargets::new(values.into_iter().zip(targets), otherwise))
}
fn parse_call(&self, args: &[ExprId]) -> PResult<TerminatorKind<'tcx>> {
let (destination, call) = parse_by_kind!(self, args[0], _, "function call",
ExprKind::Assign { lhs, rhs } => (*lhs, *rhs),
);
let destination = self.parse_place(destination)?;
let target = self.parse_return_to(args[1])?;
let unwind = self.parse_unwind_action(args[2])?;
parse_by_kind!(self, call, _, "function call",
ExprKind::Call { fun, args, from_hir_call, fn_span, .. } => {
let fun = self.parse_operand(*fun)?;
let args = args
.iter()
.map(|arg|
Ok(Spanned { node: self.parse_operand(*arg)?, span: self.thir.exprs[*arg].span } )
)
.collect::<PResult<Box<[_]>>>()?;
Ok(TerminatorKind::Call {
func: fun,
args,
destination,
target: Some(target),
unwind,
call_source: if *from_hir_call { CallSource::Normal } else {
CallSource::OverloadedOperator
},
fn_span: *fn_span,
})
},
)
}
fn parse_tail_call(&self, args: &[ExprId]) -> PResult<TerminatorKind<'tcx>> {
parse_by_kind!(self, args[0], _, "tail call",
ExprKind::Call { fun, args, fn_span, .. } => {
let fun = self.parse_operand(*fun)?;
let args = args
.iter()
.map(|arg|
Ok(Spanned { node: self.parse_operand(*arg)?, span: self.thir.exprs[*arg].span } )
)
.collect::<PResult<Box<[_]>>>()?;
Ok(TerminatorKind::TailCall {
func: fun,
args,
fn_span: *fn_span,
})
},
)
}
fn parse_rvalue(&self, expr_id: ExprId) -> PResult<Rvalue<'tcx>> {
parse_by_kind!(self, expr_id, expr, "rvalue",
@call(mir_discriminant, args) => self.parse_place(args[0]).map(Rvalue::Discriminant),
@call(mir_cast_transmute, args) => {
let source = self.parse_operand(args[0])?;
Ok(Rvalue::Cast(CastKind::Transmute, source, expr.ty))
},
@call(mir_cast_ptr_to_ptr, args) => {
let source = self.parse_operand(args[0])?;
Ok(Rvalue::Cast(CastKind::PtrToPtr, source, expr.ty))
},
@call(mir_cast_unsize, args) => {
let source = self.parse_operand(args[0])?;
let kind = CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, CoercionSource::AsCast);
Ok(Rvalue::Cast(kind, source, expr.ty))
},
@call(mir_checked, args) => {
parse_by_kind!(self, args[0], _, "binary op",
ExprKind::Binary { op, lhs, rhs } => {
if let Some(op_with_overflow) = op.wrapping_to_overflowing() {
Ok(Rvalue::BinaryOp(
op_with_overflow, Box::new((self.parse_operand(*lhs)?, self.parse_operand(*rhs)?))
))
} else {
Err(self.expr_error(expr_id, "No WithOverflow form of this operator"))
}
},
)
},
@call(mir_offset, args) => {
let ptr = self.parse_operand(args[0])?;
let offset = self.parse_operand(args[1])?;
Ok(Rvalue::BinaryOp(BinOp::Offset, Box::new((ptr, offset))))
},
@call(mir_ptr_metadata, args) => Ok(Rvalue::UnaryOp(UnOp::PtrMetadata, self.parse_operand(args[0])?)),
ExprKind::Borrow { borrow_kind, arg } => Ok(
Rvalue::Ref(self.tcx.lifetimes.re_erased, *borrow_kind, self.parse_place(*arg)?)
),
ExprKind::RawBorrow { mutability, arg } => Ok(
Rvalue::RawPtr((*mutability).into(), self.parse_place(*arg)?)
),
ExprKind::Binary { op, lhs, rhs } => Ok(
Rvalue::BinaryOp(*op, Box::new((self.parse_operand(*lhs)?, self.parse_operand(*rhs)?)))
),
ExprKind::Unary { op, arg } => Ok(
Rvalue::UnaryOp(*op, self.parse_operand(*arg)?)
),
ExprKind::Repeat { value, count } => Ok(
Rvalue::Repeat(self.parse_operand(*value)?, *count)
),
ExprKind::Cast { source } => {
let source = self.parse_operand(*source)?;
let source_ty = source.ty(self.body.local_decls(), self.tcx);
let cast_kind = mir_cast_kind(source_ty, expr.ty);
Ok(Rvalue::Cast(cast_kind, source, expr.ty))
},
ExprKind::Tuple { fields } => Ok(
Rvalue::Aggregate(
Box::new(AggregateKind::Tuple),
fields.iter().map(|e| self.parse_operand(*e)).collect::<Result<_, _>>()?
)
),
ExprKind::Array { fields } => {
let elem_ty = expr.ty.builtin_index().expect("ty must be an array");
Ok(Rvalue::Aggregate(
Box::new(AggregateKind::Array(elem_ty)),
fields.iter().map(|e| self.parse_operand(*e)).collect::<Result<_, _>>()?
))
},
ExprKind::Adt(box AdtExpr { adt_def, variant_index, args, fields, .. }) => {
let is_union = adt_def.is_union();
let active_field_index = is_union.then(|| fields[0].name);
Ok(Rvalue::Aggregate(
Box::new(AggregateKind::Adt(adt_def.did(), *variant_index, args, None, active_field_index)),
fields.iter().map(|f| self.parse_operand(f.expr)).collect::<Result<_, _>>()?
))
},
_ => self.parse_operand(expr_id).map(Rvalue::Use),
)
}
pub(crate) fn parse_operand(&self, expr_id: ExprId) -> PResult<Operand<'tcx>> {
parse_by_kind!(self, expr_id, expr, "operand",
@call(mir_move, args) => self.parse_place(args[0]).map(Operand::Move),
@call(mir_static, args) => self.parse_static(args[0]),
@call(mir_static_mut, args) => self.parse_static(args[0]),
ExprKind::Literal { .. }
| ExprKind::NamedConst { .. }
| ExprKind::NonHirLiteral { .. }
| ExprKind::ZstLiteral { .. }
| ExprKind::ConstParam { .. }
| ExprKind::ConstBlock { .. } => {
Ok(Operand::Constant(Box::new(
as_constant_inner(expr, |_| None, self.tcx)
)))
},
_ => self.parse_place(expr_id).map(Operand::Copy),
)
}
fn parse_place(&self, expr_id: ExprId) -> PResult<Place<'tcx>> {
self.parse_place_inner(expr_id).map(|(x, _)| x)
}
fn parse_place_inner(&self, expr_id: ExprId) -> PResult<(Place<'tcx>, PlaceTy<'tcx>)> {
let (parent, proj) = parse_by_kind!(self, expr_id, expr, "place",
@call(mir_field, args) => {
let (parent, place_ty) = self.parse_place_inner(args[0])?;
let field = FieldIdx::from_u32(self.parse_integer_literal(args[1])? as u32);
let field_ty = PlaceTy::field_ty(self.tcx, place_ty.ty, place_ty.variant_index, field);
let proj = PlaceElem::Field(field, field_ty);
let place = parent.project_deeper(&[proj], self.tcx);
return Ok((place, PlaceTy::from_ty(field_ty)));
},
@call(mir_variant, args) => {
(args[0], PlaceElem::Downcast(
None,
VariantIdx::from_u32(self.parse_integer_literal(args[1])? as u32)
))
},
ExprKind::Deref { arg } => {
parse_by_kind!(self, *arg, _, "does not matter",
@call(mir_make_place, args) => return self.parse_place_inner(args[0]),
_ => (*arg, PlaceElem::Deref),
)
},
ExprKind::Index { lhs, index } => (*lhs, PlaceElem::Index(self.parse_local(*index)?)),
ExprKind::Field { lhs, name: field, .. } => (*lhs, PlaceElem::Field(*field, expr.ty)),
_ => {
let place = self.parse_local(expr_id).map(Place::from)?;
return Ok((place, PlaceTy::from_ty(expr.ty)))
},
);
let (parent, ty) = self.parse_place_inner(parent)?;
let place = parent.project_deeper(&[proj], self.tcx);
let ty = ty.projection_ty(self.tcx, proj);
Ok((place, ty))
}
fn parse_local(&self, expr_id: ExprId) -> PResult<Local> {
parse_by_kind!(self, expr_id, _, "local",
ExprKind::VarRef { id } => Ok(self.local_map[id]),
)
}
fn parse_block(&self, expr_id: ExprId) -> PResult<BasicBlock> {
parse_by_kind!(self, expr_id, _, "basic block",
ExprKind::VarRef { id } => Ok(self.block_map[id]),
)
}
fn parse_static(&self, expr_id: ExprId) -> PResult<Operand<'tcx>> {
let expr_id = parse_by_kind!(self, expr_id, _, "static",
ExprKind::Deref { arg } => *arg,
);
parse_by_kind!(self, expr_id, expr, "static",
ExprKind::StaticRef { alloc_id, ty, .. } => {
let const_val =
ConstValue::Scalar(Scalar::from_pointer((*alloc_id).into(), &self.tcx));
let const_ = Const::Val(const_val, *ty);
Ok(Operand::Constant(Box::new(ConstOperand {
span: expr.span,
user_ty: None,
const_
})))
},
)
}
fn parse_integer_literal(&self, expr_id: ExprId) -> PResult<u128> {
parse_by_kind!(self, expr_id, expr, "constant",
ExprKind::Literal { .. }
| ExprKind::NamedConst { .. }
| ExprKind::NonHirLiteral { .. }
| ExprKind::ConstBlock { .. } => Ok({
let value = as_constant_inner(expr, |_| None, self.tcx);
value.const_.eval_bits(self.tcx, self.typing_env)
}),
)
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_mir_build/src/builder/custom/parse/instruction.rs |
# pylint:disable=consider-using-with
from typing import List, Dict
import json
import subprocess
import argparse
import tempfile
import os
import itertools
from collections import defaultdict
import angr
UNIQUE_STRING_COUNT = 20
# strings longer than MAX_UNIQUE_STRING_LEN will be truncated
MAX_UNIQUE_STRING_LEN = 70
def get_basic_info(ar_path: str) -> Dict[str,str]:
"""
Get basic information of the archive file.
"""
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
# Load arch and OS information from the first .o file
o_files = [ f for f in os.listdir(".") if f.endswith(".o") ]
if o_files:
proj = angr.Project(o_files[0], auto_load_libs=False)
arch_name = proj.arch.name.lower()
os_name = proj.simos.name.lower()
os.chdir(cwd)
return {
'arch': arch_name,
'platform': os_name,
}
def get_unique_strings(ar_path: str) -> List[str]:
"""
For Linux libraries, this method requires ar (from binutils), nm (from binutils), and strings.
"""
# get symbols
nm_output = subprocess.check_output(["nm", ar_path])
nm_lines = nm_output.decode("utf-8").split("\n")
symbols = set()
for nm_line in nm_lines:
symbol_types = "UuVvTtRrDdWwBbNn"
for symbol_type in symbol_types:
if f" {symbol_type} " in nm_line:
# parse it
symbol = nm_line[nm_line.find(f" {symbol_type}") + 3: ].strip(" ")
if "." in symbol:
symbols |= set(symbol.split("."))
else:
symbols.add(symbol)
break
# extract the archive file into a temporary directory
all_strings = set()
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
for filename in os.listdir("."):
if filename.endswith(".o"):
strings = subprocess.check_output(["strings", "-n", "8", filename])
strings = strings.decode("utf-8").split("\n")
non_symbol_strings = set()
for s in strings:
if s in symbols:
continue
if "." in s and any(subs in symbols for subs in s.split(".")):
continue
# C++ specific
if "::" in s:
continue
if "_" in s:
# make sure it's not a substring of any symbol
is_substring = False
for symbol in symbols:
if s in symbol:
is_substring = True
break
if is_substring:
continue
non_symbol_strings.add(s)
all_strings |= non_symbol_strings
os.chdir(cwd)
grouped_strings = defaultdict(set)
for s in all_strings:
grouped_strings[s[:5]].add(s)
sorted_strings = list(sorted(all_strings, key=len, reverse=True))
ctr = 0
picked = set()
unique_strings = [ ]
for s in sorted_strings:
if s[:5] in picked:
continue
unique_strings.append(s[:MAX_UNIQUE_STRING_LEN])
picked.add(s[:5])
ctr += 1
if ctr >= UNIQUE_STRING_COUNT:
break
return unique_strings
def run_pelf(pelf_path: str, ar_path: str, output_path: str):
subprocess.check_call([pelf_path, ar_path, output_path])
def run_sigmake(sigmake_path: str, sig_name: str, pat_path: str, sig_path: str):
if " " not in sig_name:
sig_name_arg = f"-n{sig_name}"
else:
sig_name_arg = f"-n\"{sig_name}\""
proc = subprocess.Popen([sigmake_path, sig_name_arg, pat_path, sig_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = proc.communicate()
if b"COLLISIONS:" in stderr:
return False
return True
def process_exc_file(exc_path: str):
"""
We are doing the stupidest thing possible: For each batch of conflicts, we pick the most likely
result baed on a set of predefined rules.
TODO: Add caller-callee-based de-duplication.
"""
with open(exc_path, "r") as f:
data = f.read()
lines = data.split("\n")
# parse groups
ctr = itertools.count()
idx = 0
groups = defaultdict(dict)
for line in lines:
if line.startswith(";"):
continue
if not line:
idx = next(ctr)
else:
# parse the function name
func_name = line[:line.index("\t")].strip(" ")
groups[idx][func_name] = line
# for each group, decide the one to keep
for idx in list(groups.keys()):
g = groups[idx]
if len(g) == 1:
# don't pick anything. This is a weird case that I don't understand
continue
if all(func_name.endswith(".cold") for func_name in g):
# .cold functions. doesn't matter what we pick
continue
non_cold_names = [ ]
for func_name in g:
if func_name.endswith(".cold"):
continue
non_cold_names.append(func_name)
# sort it
non_cold_names = list(sorted(non_cold_names, key=len))
# pick the top one
the_chosen_one = non_cold_names[0]
line = g[the_chosen_one]
g[the_chosen_one] = "+" + line
# output
with open(exc_path, "w") as f:
for g in groups.values():
for line in g.values():
f.write(line + "\n")
f.write("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ar_path", help="Path of the .a file to build signatures for")
parser.add_argument("sig_name", help="Name of the signature (a string inside the signature file)")
parser.add_argument("sig_path", help="File name of the generated signature")
parser.add_argument("--compiler", help="Name of the compiler (e.g., gcc, clang). It will be stored in the meta "
"data file.")
parser.add_argument("--compiler_version", help="Version of the compiler (e.g., 6). It will be stored in the meta "
"data file.")
# parser.add_argument("--platform", help="Name of the platform (e.g., windows/linux/macos). It will be stored in
# the meta data file.")
parser.add_argument("--os", help="Name of the operating system (e.g., ubuntu/debian). It will be stored in the "
"meta data file.")
parser.add_argument("--os_version", help="Version of the operating system (e.g., 20.04). It will be stored in the "
"meta data file.")
parser.add_argument("--pelf_path", help="Path of pelf")
parser.add_argument("--sigmake_path", help="Path of sigmake")
args = parser.parse_args()
if args.pelf_path:
pelf_path = args.pelf_path
elif "pelf_path" in os.environ:
pelf_path = os.environ['pelf_path']
else:
raise ValueError("pelf_path must be specified.")
if args.sigmake_path:
sigmake_path = args.pelf_path
elif "sigmake_path" in os.environ:
sigmake_path = os.environ['sigmake_path']
else:
raise ValueError("sigmake_path must be specified.")
compiler = args.compiler
if compiler:
compiler = compiler.lower()
compiler_version = args.compiler_version
if compiler_version:
compiler_version = compiler_version.lower()
os_name = args.os
if os_name:
os_name = os_name.lower()
os_version = args.os_version
if os_version:
os_version = os_version.lower()
# Get basic information
# Get basic information
basic_info = get_basic_info(args.ar_path)
# Get unique strings from the library
unique_strings = get_unique_strings(args.ar_path)
# Build necessary file paths
sig_path_basename = os.path.basename(args.sig_path)
if "." in sig_path_basename:
sig_dir = os.path.dirname(args.sig_path)
filename = sig_path_basename[:sig_path_basename.rfind(".")]
exc_path = os.path.join(
sig_dir,
filename + ".exc"
)
meta_path = os.path.join(
sig_dir,
filename + ".meta"
)
else:
exc_path = args.sig_path + ".exc"
meta_path = args.sig_path + ".meta"
if os.path.isfile(exc_path):
# Remove existing exc files (if there is one)
os.remove(exc_path)
# Make a temporary directory
with tempfile.TemporaryDirectory() as tmpdirname:
ar_path = args.ar_path
basename = os.path.basename(ar_path)
# sanitize basename since otherwise sigmake is not happy with it
if basename.endswith(".a"):
basename = basename[:-2]
basename = basename.replace("+", "plus")
# sanitize signame as well
sig_name = args.sig_name
sig_name = sig_name.replace("+", "plus")
pat_path = os.path.join(tmpdirname, basename + ".pat")
run_pelf(pelf_path, ar_path, pat_path)
has_collision = not run_sigmake(sigmake_path, sig_name, pat_path, args.sig_path)
if has_collision:
process_exc_file(exc_path)
# run sigmake again
has_collision = not run_sigmake(sigmake_path, args.sig_name, pat_path, args.sig_path)
assert not has_collision
with open(meta_path, "w") as f:
metadata = {
'unique_strings': unique_strings,
}
metadata.update(basic_info)
if compiler_version:
metadata['compiler_version'] = compiler_version
if compiler:
metadata['compiler'] = compiler
if os_name:
metadata['os'] = os_name
if os_version:
metadata['os_version'] = os_version
f.write(json.dumps(metadata, indent=2))
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
//===--- JSONTransport.cpp - sending and receiving LSP messages over JSON -===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Protocol.h" // For LSPError
#include "Transport.h"
#include "support/Cancellation.h"
#include "support/Logger.h"
#include "support/Shutdown.h"
#include "support/ThreadCrashReporter.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Error.h"
#include <optional>
#include <system_error>
namespace clang {
namespace clangd {
namespace {
llvm::json::Object encodeError(llvm::Error E) {
std::string Message;
ErrorCode Code = ErrorCode::UnknownErrorCode;
// FIXME: encode cancellation errors using RequestCancelled or ContentModified
// as appropriate.
if (llvm::Error Unhandled = llvm::handleErrors(
std::move(E),
[&](const CancelledError &C) -> llvm::Error {
switch (C.Reason) {
case static_cast<int>(ErrorCode::ContentModified):
Code = ErrorCode::ContentModified;
Message = "Request cancelled because the document was modified";
break;
default:
Code = ErrorCode::RequestCancelled;
Message = "Request cancelled";
break;
}
return llvm::Error::success();
},
[&](const LSPError &L) -> llvm::Error {
Message = L.Message;
Code = L.Code;
return llvm::Error::success();
}))
Message = llvm::toString(std::move(Unhandled));
return llvm::json::Object{
{"message", std::move(Message)},
{"code", int64_t(Code)},
};
}
llvm::Error decodeError(const llvm::json::Object &O) {
llvm::StringRef Msg = O.getString("message").value_or("Unspecified error");
if (auto Code = O.getInteger("code"))
return llvm::make_error<LSPError>(Msg.str(), ErrorCode(*Code));
return error(Msg.str());
}
class JSONTransport : public Transport {
public:
JSONTransport(std::FILE *In, llvm::raw_ostream &Out,
llvm::raw_ostream *InMirror, bool Pretty, JSONStreamStyle Style)
: In(In), Out(Out), InMirror(InMirror ? *InMirror : llvm::nulls()),
Pretty(Pretty), Style(Style) {}
void notify(llvm::StringRef Method, llvm::json::Value Params) override {
sendMessage(llvm::json::Object{
{"jsonrpc", "2.0"},
{"method", Method},
{"params", std::move(Params)},
});
}
void call(llvm::StringRef Method, llvm::json::Value Params,
llvm::json::Value ID) override {
sendMessage(llvm::json::Object{
{"jsonrpc", "2.0"},
{"id", std::move(ID)},
{"method", Method},
{"params", std::move(Params)},
});
}
void reply(llvm::json::Value ID,
llvm::Expected<llvm::json::Value> Result) override {
if (Result) {
sendMessage(llvm::json::Object{
{"jsonrpc", "2.0"},
{"id", std::move(ID)},
{"result", std::move(*Result)},
});
} else {
sendMessage(llvm::json::Object{
{"jsonrpc", "2.0"},
{"id", std::move(ID)},
{"error", encodeError(Result.takeError())},
});
}
}
llvm::Error loop(MessageHandler &Handler) override {
std::string JSON; // Messages may be large, reuse same big buffer.
while (!feof(In)) {
if (shutdownRequested())
return error(std::make_error_code(std::errc::operation_canceled),
"Got signal, shutting down");
if (ferror(In))
return llvm::errorCodeToError(llvm::errnoAsErrorCode());
if (readRawMessage(JSON)) {
ThreadCrashReporter ScopedReporter([&JSON]() {
auto &OS = llvm::errs();
OS << "Signalled while processing message:\n";
OS << JSON << "\n";
});
if (auto Doc = llvm::json::parse(JSON)) {
vlog(Pretty ? "<<< {0:2}\n" : "<<< {0}\n", *Doc);
if (!handleMessage(std::move(*Doc), Handler))
return llvm::Error::success(); // we saw the "exit" notification.
} else {
// Parse error. Log the raw message.
vlog("<<< {0}\n", JSON);
elog("JSON parse error: {0}", llvm::toString(Doc.takeError()));
}
}
}
return llvm::errorCodeToError(std::make_error_code(std::errc::io_error));
}
private:
// Dispatches incoming message to Handler onNotify/onCall/onReply.
bool handleMessage(llvm::json::Value Message, MessageHandler &Handler);
// Writes outgoing message to Out stream.
void sendMessage(llvm::json::Value Message) {
OutputBuffer.clear();
llvm::raw_svector_ostream OS(OutputBuffer);
OS << llvm::formatv(Pretty ? "{0:2}" : "{0}", Message);
Out << "Content-Length: " << OutputBuffer.size() << "\r\n\r\n"
<< OutputBuffer;
Out.flush();
vlog(">>> {0}\n", OutputBuffer);
}
// Read raw string messages from input stream.
bool readRawMessage(std::string &JSON) {
return Style == JSONStreamStyle::Delimited ? readDelimitedMessage(JSON)
: readStandardMessage(JSON);
}
bool readDelimitedMessage(std::string &JSON);
bool readStandardMessage(std::string &JSON);
llvm::SmallVector<char, 0> OutputBuffer;
std::FILE *In;
llvm::raw_ostream &Out;
llvm::raw_ostream &InMirror;
bool Pretty;
JSONStreamStyle Style;
};
bool JSONTransport::handleMessage(llvm::json::Value Message,
MessageHandler &Handler) {
// Message must be an object with "jsonrpc":"2.0".
auto *Object = Message.getAsObject();
if (!Object ||
Object->getString("jsonrpc") != std::optional<llvm::StringRef>("2.0")) {
elog("Not a JSON-RPC 2.0 message: {0:2}", Message);
return false;
}
// ID may be any JSON value. If absent, this is a notification.
std::optional<llvm::json::Value> ID;
if (auto *I = Object->get("id"))
ID = std::move(*I);
auto Method = Object->getString("method");
if (!Method) { // This is a response.
if (!ID) {
elog("No method and no response ID: {0:2}", Message);
return false;
}
if (auto *Err = Object->getObject("error"))
return Handler.onReply(std::move(*ID), decodeError(*Err));
// Result should be given, use null if not.
llvm::json::Value Result = nullptr;
if (auto *R = Object->get("result"))
Result = std::move(*R);
return Handler.onReply(std::move(*ID), std::move(Result));
}
// Params should be given, use null if not.
llvm::json::Value Params = nullptr;
if (auto *P = Object->get("params"))
Params = std::move(*P);
if (ID)
return Handler.onCall(*Method, std::move(Params), std::move(*ID));
return Handler.onNotify(*Method, std::move(Params));
}
// Tries to read a line up to and including \n.
// If failing, feof(), ferror(), or shutdownRequested() will be set.
bool readLine(std::FILE *In, llvm::SmallVectorImpl<char> &Out) {
// Big enough to hold any reasonable header line. May not fit content lines
// in delimited mode, but performance doesn't matter for that mode.
static constexpr int BufSize = 128;
size_t Size = 0;
Out.clear();
for (;;) {
Out.resize_for_overwrite(Size + BufSize);
// Handle EINTR which is sent when a debugger attaches on some platforms.
if (!retryAfterSignalUnlessShutdown(
nullptr, [&] { return std::fgets(&Out[Size], BufSize, In); }))
return false;
clearerr(In);
// If the line contained null bytes, anything after it (including \n) will
// be ignored. Fortunately this is not a legal header or JSON.
size_t Read = std::strlen(&Out[Size]);
if (Read > 0 && Out[Size + Read - 1] == '\n') {
Out.resize(Size + Read);
return true;
}
Size += Read;
}
}
// Returns None when:
// - ferror(), feof(), or shutdownRequested() are set.
// - Content-Length is missing or empty (protocol error)
bool JSONTransport::readStandardMessage(std::string &JSON) {
// A Language Server Protocol message starts with a set of HTTP headers,
// delimited by \r\n, and terminated by an empty line (\r\n).
unsigned long long ContentLength = 0;
llvm::SmallString<128> Line;
while (true) {
if (feof(In) || ferror(In) || !readLine(In, Line))
return false;
InMirror << Line;
llvm::StringRef LineRef = Line;
// We allow comments in headers. Technically this isn't part
// of the LSP specification, but makes writing tests easier.
if (LineRef.starts_with("#"))
continue;
// Content-Length is a mandatory header, and the only one we handle.
if (LineRef.consume_front("Content-Length: ")) {
if (ContentLength != 0) {
elog("Warning: Duplicate Content-Length header received. "
"The previous value for this message ({0}) was ignored.",
ContentLength);
}
llvm::getAsUnsignedInteger(LineRef.trim(), 0, ContentLength);
continue;
}
// An empty line indicates the end of headers.
// Go ahead and read the JSON.
if (LineRef.trim().empty())
break;
// It's another header, ignore it.
}
// The fuzzer likes crashing us by sending "Content-Length: 9999999999999999"
if (ContentLength > 1 << 30) { // 1024M
elog("Refusing to read message with long Content-Length: {0}. "
"Expect protocol errors",
ContentLength);
return false;
}
if (ContentLength == 0) {
log("Warning: Missing Content-Length header, or zero-length message.");
return false;
}
JSON.resize(ContentLength);
for (size_t Pos = 0, Read; Pos < ContentLength; Pos += Read) {
// Handle EINTR which is sent when a debugger attaches on some platforms.
Read = retryAfterSignalUnlessShutdown(0, [&]{
return std::fread(&JSON[Pos], 1, ContentLength - Pos, In);
});
if (Read == 0) {
elog("Input was aborted. Read only {0} bytes of expected {1}.", Pos,
ContentLength);
return false;
}
InMirror << llvm::StringRef(&JSON[Pos], Read);
clearerr(In); // If we're done, the error was transient. If we're not done,
// either it was transient or we'll see it again on retry.
Pos += Read;
}
return true;
}
// For lit tests we support a simplified syntax:
// - messages are delimited by '---' on a line by itself
// - lines starting with # are ignored.
// This is a testing path, so favor simplicity over performance here.
// When returning false: feof(), ferror(), or shutdownRequested() will be set.
bool JSONTransport::readDelimitedMessage(std::string &JSON) {
JSON.clear();
llvm::SmallString<128> Line;
while (readLine(In, Line)) {
InMirror << Line;
auto LineRef = Line.str().trim();
if (LineRef.starts_with("#")) // comment
continue;
// found a delimiter
if (LineRef.rtrim() == "---")
break;
JSON += Line;
}
if (shutdownRequested())
return false;
if (ferror(In)) {
elog("Input error while reading message!");
return false;
}
return true; // Including at EOF
}
} // namespace
std::unique_ptr<Transport> newJSONTransport(std::FILE *In,
llvm::raw_ostream &Out,
llvm::raw_ostream *InMirror,
bool Pretty,
JSONStreamStyle Style) {
return std::make_unique<JSONTransport>(In, Out, InMirror, Pretty, Style);
}
} // namespace clangd
} // namespace clang | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clangd/JSONTransport.cpp |
import requests
from django.core.management.base import BaseCommand, CommandError
from vote.models import Category, Option, OptionPicture
def epsilonator(target):
def epsilon(media):
return abs(media['width'] - target)
return epsilon
thumbnail_epsilon = epsilonator(120)
preview_epsilon = epsilonator(600)
class Command(BaseCommand):
args = 'http://gallery.example.com/v2/path/to/album'
help = 'Import an Edegal gallery as an album into the voting application'
def handle(*args, **options):
for album_url in args[1:]:
base_url, unused = album_url.split('/v2/', 1)
album = requests.get(album_url).json()
category = Category.objects.create(
title=album['title'],
description=album['description'],
template='category_picture.jade'
)
for picture in album['pictures']:
if '-00' in picture['title']:
continue
option = category.option_set.create(title=picture['title'])
thumbnail = min(picture['media'], key=thumbnail_epsilon)
preview = min(picture['media'], key=preview_epsilon)
OptionPicture.objects.create(
option=option,
thumbnail=base_url + thumbnail['src'],
preview=base_url + preview['src'],
link=base_url + picture['path']
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import badges.models
from django.conf import settings
import django.utils.timezone
from model_utils import fields
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BadgeAssertion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', jsonfield.fields.JSONField()),
('backend', models.CharField(max_length=50)),
('image_url', models.URLField()),
('assertion_url', models.URLField()),
('modified', fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created', fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False, db_index=True)),
],
),
migrations.CreateModel(
name='BadgeClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(max_length=255, validators=[badges.models.validate_lowercase])),
('issuing_component', models.SlugField(default=b'', blank=True, validators=[badges.models.validate_lowercase])),
('display_name', models.CharField(max_length=255)),
('course_id', CourseKeyField(default=None, max_length=255, blank=True)),
('description', models.TextField()),
('criteria', models.TextField()),
('mode', models.CharField(default=b'', max_length=100, blank=True)),
('image', models.ImageField(upload_to=b'badge_classes', validators=[badges.models.validate_badge_image])),
],
),
migrations.CreateModel(
name='CourseCompleteImageConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mode', models.CharField(help_text='The course mode for this badge image. For example, "verified" or "honor".', unique=True, max_length=125)),
('icon', models.ImageField(help_text='Badge images must be square PNG files. The file size should be under 250KB.', upload_to=b'course_complete_badges', validators=[badges.models.validate_badge_image])),
('default', models.BooleanField(default=False, help_text='Set this value to True if you want this image to be the default image for any course modes that do not have a specified badge image. You can have only one default image.')),
],
),
migrations.AlterUniqueTogether(
name='badgeclass',
unique_together=set([('slug', 'issuing_component', 'course_id')]),
),
migrations.AddField(
model_name='badgeassertion',
name='badge_class',
field=models.ForeignKey(to='badges.BadgeClass'),
),
migrations.AddField(
model_name='badgeassertion',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
] | unknown | codeparrot/codeparrot-clean | ||
"""Test cases for the fnmatch module."""
from test import test_support
import unittest
from fnmatch import fnmatch, fnmatchcase
class FnmatchTestCase(unittest.TestCase):
def check_match(self, filename, pattern, should_match=1):
if should_match:
self.assert_(fnmatch(filename, pattern),
"expected %r to match pattern %r"
% (filename, pattern))
else:
self.assert_(not fnmatch(filename, pattern),
"expected %r not to match pattern %r"
% (filename, pattern))
def test_fnmatch(self):
check = self.check_match
check('abc', 'abc')
check('abc', '?*?')
check('abc', '???*')
check('abc', '*???')
check('abc', '???')
check('abc', '*')
check('abc', 'ab[cd]')
check('abc', 'ab[!de]')
check('abc', 'ab[de]', 0)
check('a', '??', 0)
check('a', 'b', 0)
# these test that '\' is handled correctly in character sets;
# see SF bug #???
check('\\', r'[\]')
check('a', r'[!\]')
check('\\', r'[!\]', 0)
def test_main():
test_support.run_unittest(FnmatchTestCase)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "C"
var sink []byte
//export GoFunction7
func GoFunction7() {
sink = make([]byte, 4096)
}
func main() {
} | go | github | https://github.com/golang/go | src/cmd/cgo/internal/testcarchive/testdata/libgo7/sink.go |
<?php
namespace Illuminate\Support;
use Carbon\CarbonInterval;
use Closure;
use DateInterval;
use Illuminate\Support\Traits\Macroable;
use PHPUnit\Framework\Assert as PHPUnit;
use RuntimeException;
class Sleep
{
use Macroable;
/**
* The fake sleep callbacks.
*
* @var array
*/
public static $fakeSleepCallbacks = [];
/**
* Keep Carbon's "now" in sync when sleeping.
*
* @var bool
*/
protected static $syncWithCarbon = false;
/**
* The total duration to sleep.
*
* @var \Carbon\CarbonInterval
*/
public $duration;
/**
* The callback that determines if sleeping should continue.
*
* @var \Closure
*/
public $while;
/**
* The pending duration to sleep.
*
* @var int|float|null
*/
protected $pending = null;
/**
* Indicates that all sleeping should be faked.
*
* @var bool
*/
protected static $fake = false;
/**
* The sequence of sleep durations encountered while faking.
*
* @var array<int, \Carbon\CarbonInterval>
*/
protected static $sequence = [];
/**
* Indicates if the instance should sleep.
*
* @var bool
*/
protected $shouldSleep = true;
/**
* Indicates if the instance already slept via `then()`.
*
* @var bool
*/
protected $alreadySlept = false;
/**
* Create a new class instance.
*
* @param int|float|\DateInterval $duration
*/
public function __construct($duration)
{
$this->duration($duration);
}
/**
* Sleep for the given duration.
*
* @param \DateInterval|int|float $duration
* @return static
*/
public static function for($duration)
{
return new static($duration);
}
/**
* Sleep until the given timestamp.
*
* @param \DateTimeInterface|int|float|numeric-string $timestamp
* @return static
*/
public static function until($timestamp)
{
if (is_numeric($timestamp)) {
$timestamp = Carbon::createFromTimestamp($timestamp, date_default_timezone_get());
}
return new static(Carbon::now()->diff($timestamp));
}
/**
* Sleep for the given number of microseconds.
*
* @param int $duration
* @return static
*/
public static function usleep($duration)
{
return (new static($duration))->microseconds();
}
/**
* Sleep for the given number of seconds.
*
* @param int|float $duration
* @return static
*/
public static function sleep($duration)
{
return (new static($duration))->seconds();
}
/**
* Sleep for the given duration. Replaces any previously defined duration.
*
* @param \DateInterval|int|float $duration
* @return $this
*/
protected function duration($duration)
{
if (! $duration instanceof DateInterval) {
$this->duration = CarbonInterval::microsecond(0);
$this->pending = $duration;
} else {
$duration = CarbonInterval::instance($duration);
if ($duration->totalMicroseconds < 0) {
$duration = CarbonInterval::seconds(0);
}
$this->duration = $duration;
$this->pending = null;
}
return $this;
}
/**
* Sleep for the given number of minutes.
*
* @return $this
*/
public function minutes()
{
$this->duration->add('minutes', $this->pullPending());
return $this;
}
/**
* Sleep for one minute.
*
* @return $this
*/
public function minute()
{
return $this->minutes();
}
/**
* Sleep for the given number of seconds.
*
* @return $this
*/
public function seconds()
{
$this->duration->add('seconds', $this->pullPending());
return $this;
}
/**
* Sleep for one second.
*
* @return $this
*/
public function second()
{
return $this->seconds();
}
/**
* Sleep for the given number of milliseconds.
*
* @return $this
*/
public function milliseconds()
{
$this->duration->add('milliseconds', $this->pullPending());
return $this;
}
/**
* Sleep for one millisecond.
*
* @return $this
*/
public function millisecond()
{
return $this->milliseconds();
}
/**
* Sleep for the given number of microseconds.
*
* @return $this
*/
public function microseconds()
{
$this->duration->add('microseconds', $this->pullPending());
return $this;
}
/**
* Sleep for on microsecond.
*
* @return $this
*/
public function microsecond()
{
return $this->microseconds();
}
/**
* Add additional time to sleep for.
*
* @param int|float $duration
* @return $this
*/
public function and($duration)
{
$this->pending = $duration;
return $this;
}
/**
* Sleep while a given callback returns "true".
*
* @param \Closure $callback
* @return $this
*/
public function while(Closure $callback)
{
$this->while = $callback;
return $this;
}
/**
* Specify a callback that should be executed after sleeping.
*
* @param callable $then
* @return mixed
*/
public function then(callable $then)
{
$this->goodnight();
$this->alreadySlept = true;
return $then();
}
/**
* Handle the object's destruction.
*
* @return void
*/
public function __destruct()
{
$this->goodnight();
}
/**
* Handle the object's destruction.
*
* @return void
*/
protected function goodnight()
{
if ($this->alreadySlept || ! $this->shouldSleep) {
return;
}
if ($this->pending !== null) {
throw new RuntimeException('Unknown duration unit.');
}
if (static::$fake) {
static::$sequence[] = $this->duration;
if (static::$syncWithCarbon) {
Carbon::setTestNow(Carbon::now()->add($this->duration));
}
foreach (static::$fakeSleepCallbacks as $callback) {
$callback($this->duration);
}
return;
}
$remaining = $this->duration->copy();
$seconds = (int) $remaining->totalSeconds;
$while = $this->while ?: function () {
static $return = [true, false];
return array_shift($return);
};
while ($while()) {
if ($seconds > 0) {
sleep($seconds);
$remaining = $remaining->subSeconds($seconds);
}
$microseconds = (int) $remaining->totalMicroseconds;
if ($microseconds > 0) {
usleep($microseconds);
}
}
}
/**
* Resolve the pending duration.
*
* @return int|float
*/
protected function pullPending()
{
if ($this->pending === null) {
$this->shouldNotSleep();
throw new RuntimeException('No duration specified.');
}
if ($this->pending < 0) {
$this->pending = 0;
}
return tap($this->pending, function () {
$this->pending = null;
});
}
/**
* Stay awake and capture any attempts to sleep.
*
* @param bool $value
* @param bool $syncWithCarbon
* @return void
*/
public static function fake($value = true, $syncWithCarbon = false)
{
static::$fake = $value;
static::$sequence = [];
static::$fakeSleepCallbacks = [];
static::$syncWithCarbon = $syncWithCarbon;
}
/**
* Assert a given amount of sleeping occurred a specific number of times.
*
* @param \Closure $expected
* @param int $times
* @return void
*/
public static function assertSlept($expected, $times = 1)
{
$count = (new Collection(static::$sequence))->filter($expected)->count();
PHPUnit::assertSame(
$times,
$count,
"The expected sleep was found [{$count}] times instead of [{$times}]."
);
}
/**
* Assert sleeping occurred a given number of times.
*
* @param int $expected
* @return void
*/
public static function assertSleptTimes($expected)
{
PHPUnit::assertSame($expected, $count = count(static::$sequence), "Expected [{$expected}] sleeps but found [{$count}].");
}
/**
* Assert the given sleep sequence was encountered.
*
* @param array $sequence
* @return void
*/
public static function assertSequence($sequence)
{
try {
static::assertSleptTimes(count($sequence));
(new Collection($sequence))
->zip(static::$sequence)
->eachSpread(function (?Sleep $expected, CarbonInterval $actual) {
if ($expected === null) {
return;
}
PHPUnit::assertTrue(
$expected->shouldNotSleep()->duration->equalTo($actual),
vsprintf('Expected sleep duration of [%s] but actually slept for [%s].', [
$expected->duration->cascade()->forHumans([
'options' => 0,
'minimumUnit' => 'microsecond',
]),
$actual->cascade()->forHumans([
'options' => 0,
'minimumUnit' => 'microsecond',
]),
])
);
});
} finally {
foreach ($sequence as $expected) {
if ($expected instanceof self) {
$expected->shouldNotSleep();
}
}
}
}
/**
* Assert that no sleeping occurred.
*
* @return void
*/
public static function assertNeverSlept()
{
static::assertSleptTimes(0);
}
/**
* Assert that no sleeping occurred.
*
* @return void
*/
public static function assertInsomniac()
{
if (static::$sequence === []) {
PHPUnit::assertTrue(true);
}
foreach (static::$sequence as $duration) {
PHPUnit::assertSame(0, (int) $duration->totalMicroseconds, vsprintf('Unexpected sleep duration of [%s] found.', [
$duration->cascade()->forHumans([
'options' => 0,
'minimumUnit' => 'microsecond',
]),
]));
}
}
/**
* Indicate that the instance should not sleep.
*
* @return $this
*/
protected function shouldNotSleep()
{
$this->shouldSleep = false;
return $this;
}
/**
* Only sleep when the given condition is true.
*
* @param (\Closure($this): bool)|bool $condition
* @return $this
*/
public function when($condition)
{
$this->shouldSleep = (bool) value($condition, $this);
return $this;
}
/**
* Don't sleep when the given condition is true.
*
* @param (\Closure($this): bool)|bool $condition
* @return $this
*/
public function unless($condition)
{
return $this->when(! value($condition, $this));
}
/**
* Specify a callback that should be invoked when faking sleep within a test.
*
* @param callable $callback
* @return void
*/
public static function whenFakingSleep($callback)
{
static::$fakeSleepCallbacks[] = $callback;
}
/**
* Indicate that Carbon's "now" should be kept in sync when sleeping.
*
* @return void
*/
public static function syncWithCarbon($value = true)
{
static::$syncWithCarbon = $value;
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Support/Sleep.php |
fun foo() {
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-standalone/testData/sessionBuilder/otherModuleUsage/dependent/dependent.kt |
# basic_source.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,E0611,R0201,R0204,W0212,W0232,W0612
# PyPI imports
from numpy import array
import pytest
# Putil imports
from putil.plot import BasicSource as FUT
from putil.test import AE, AI, APROP, AROPROP
###
# Global variables
###
RIVAR = array([1, 2, 3])
RDVAR = array([10, 20, 30])
###
# Test classes
###
class TestBasicSource(object):
""" Tests for BasicSource """
def test_str(self):
""" Test that str behaves correctly """
# Full set
obj = str(FUT(RIVAR, RDVAR, indep_min=-10, indep_max=20.0))
ref = (
'Independent variable minimum: -10\n'
'Independent variable maximum: 20.0\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
# indep_min not set
obj = str(FUT(RIVAR, RDVAR, indep_max=20.0))
ref = (
'Independent variable minimum: -inf\n'
'Independent variable maximum: 20.0\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
# indep_max not set
obj = str(FUT(RIVAR, RDVAR, indep_min=-10))
ref = (
'Independent variable minimum: -10\n'
'Independent variable maximum: +inf\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
# indep_min and indep_max not set
obj = str(FUT(RIVAR, RDVAR))
ref = (
'Independent variable minimum: -inf\n'
'Independent variable maximum: +inf\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
def test_complete(self):
""" Test _complete property behavior """
obj = FUT(RIVAR, RDVAR, indep_min=0, indep_max=50)
obj._indep_var = None
assert not obj._complete
obj = FUT(RIVAR, RDVAR, indep_min=0, indep_max=50)
assert obj._complete
@pytest.mark.parametrize('indep_min', [1, 2.0])
def test_indep_min(self, indep_min):
""" Tests indep_min property behavior """
# __init__ path
FUT(RIVAR, RDVAR, indep_min=indep_min)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
obj.indep_min = indep_min
assert obj.indep_min == indep_min
@pytest.mark.basic_source
@pytest.mark.parametrize('indep_min', ['a', False])
def test_indep_min_exceptions(self, indep_min):
""" Tests indep_min property exceptions """
# __init__ path
AI(FUT, 'indep_min', RIVAR, RDVAR, indep_min=indep_min)
obj = FUT(RIVAR, RDVAR)
msg = 'Argument `indep_min` is not valid'
APROP(obj, 'indep_min', indep_min, RuntimeError, msg)
@pytest.mark.parametrize('indep_max', [1, 2.0])
def test_indep_max(self, indep_max):
""" Tests indep_max property behavior """
# __init__ path
FUT(RIVAR, RDVAR, indep_max=indep_max)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
obj.indep_max = indep_max
assert obj.indep_max == indep_max
@pytest.mark.basic_source
@pytest.mark.parametrize('indep_max', ['a', False])
def test_indep_max_exceptions(self, indep_max):
""" Tests indep_max property exceptions """
# __init__ path
AI(FUT, 'indep_max', RIVAR, RDVAR, indep_max=indep_max)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
msg = 'Argument `indep_max` is not valid'
APROP(obj, 'indep_max', indep_max, RuntimeError, msg)
#with pytest.raises(RuntimeError) as excinfo:
# obj.indep_max = indep_max
#assert GET_EXMSG(excinfo) == 'Argument `indep_max` is not valid'
@pytest.mark.basic_source
def test_indep_min_greater_than_indep_max_exceptions(self):
"""
Test behavior when indep_min and indep_max are incongruous
"""
# Assign indep_min first
obj = FUT(RIVAR, RDVAR, indep_min=0.5)
exmsg = 'Argument `indep_min` is greater than argument `indep_max`'
APROP(obj, 'indep_max', 0, ValueError, exmsg)
#with pytest.raises(ValueError) as excinfo:
# obj.indep_max = 0
#assert GET_EXMSG(excinfo) == exmsg
# Assign indep_max first
obj = FUT(RIVAR, RDVAR)
obj.indep_max = 40
APROP(obj, 'indep_min', 50, ValueError, exmsg)
#with pytest.raises(ValueError) as excinfo:
# obj.indep_min = 50
#assert GET_EXMSG(excinfo) == exmsg
def test_indep_var(self):
""" Tests indep_var property behavior """
# __init__ path
indep_var1 = RIVAR
indep_var2 = array([4.0, 5.0, 6.0])
assert (FUT(indep_var1, RDVAR).indep_var == indep_var1).all()
assert (FUT(indep_var2, RDVAR).indep_var == indep_var2).all()
# Managed attribute path
obj = FUT(indep_var=indep_var1, dep_var=RDVAR)
obj.indep_var = indep_var2
assert (obj.indep_var == indep_var2).all()
@pytest.mark.basic_source
@pytest.mark.parametrize(
'indep_var', [None, 'a', array([1.0, 2.0, 0.0, 3.0]), []]
)
def test_indep_var_exceptions(self, indep_var):
""" Tests indep_var property exceptions """
# __init__ path
AI(FUT, 'indep_var', indep_var, RDVAR)
# Assign indep_min via attribute
msg = (
'Argument `indep_var` is empty after '
'`indep_min`/`indep_max` range bounding'
)
obj = FUT(RIVAR, RDVAR)
APROP(obj, 'indep_min', 45, ValueError, msg)
# Assign indep_max via attribute
obj = FUT(RIVAR, RDVAR)
APROP(obj, 'indep_max', 0, ValueError, msg)
# Assign both indep_min and indep_max via __init__ path
AE(FUT, ValueError, msg, RIVAR, RDVAR, indep_min=4, indep_max=10)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
# Wrong type
assert (obj.indep_var == RIVAR).all()
msg = 'Argument `indep_var` is not valid'
APROP(obj, 'indep_var', indep_var, RuntimeError, msg)
#with pytest.raises(RuntimeError) as excinfo:
# obj.indep_var = indep_var
#assert GET_EXMSG(excinfo) == 'Argument `indep_var` is not valid'
def test_dep_var(self):
""" Tests dep_var property behavior """
# __init__ path
# Valid values, these should not raise any exception
indep_var = array([10, 20, 30])
dep_var1 = array([1, 2, 3])
dep_var2 = array([4.0, 5.0, 6.0])
assert (FUT(indep_var, dep_var1).dep_var == dep_var1).all()
assert (FUT(indep_var, dep_var2).dep_var == dep_var2).all()
# Managed attribute path
obj = FUT(indep_var=indep_var, dep_var=dep_var1)
obj.dep_var = dep_var1
assert (obj.dep_var == dep_var1).all()
obj.dep_var = dep_var2
assert (obj.dep_var == dep_var2).all()
@pytest.mark.basic_source
@pytest.mark.parametrize('dep_var', [None, 'a', []])
def test_dep_var_exceptions(self, dep_var):
""" Tests dep_var property exceptions """
# __init__ path
msg = 'Argument `dep_var` is not valid'
AI(FUT, 'dep_var', RIVAR, dep_var)
# Managed attribute path
obj = FUT(RIVAR, array([1, 2, 3]))
APROP(obj, 'dep_var', dep_var, RuntimeError, msg)
#with pytest.raises(RuntimeError) as excinfo:
# obj.dep_var = dep_var
#assert GET_EXMSG(excinfo) == msg
@pytest.mark.basic_source
def test_indep_dep_var_not_same_number_of_elements_exceptions(self):
""" Tests indep_var and dep_var vector congruency """
msg = (
'Arguments `indep_var` and `dep_var` '
'must have the same number of elements'
)
# Both set at object creation
AE(FUT, ValueError, msg, RDVAR, array([1, 2, 3, 4, 5, 6]), 30, 50)
AE(FUT, ValueError, msg, RDVAR, array([1, 2]), 30, 50)
# indep_var set first
obj = FUT(
indep_var=array([10, 20, 30, 40, 50, 60]),
dep_var=array([1, 2, 3, 4, 5, 6]),
indep_min=30,
indep_max=50)
APROP(obj, 'dep_var', array([100, 200, 300]), ValueError, msg)
# dep_var set first
obj = FUT(RDVAR, array([100, 200, 300]), indep_min=30, indep_max=50)
APROP(obj, 'dep_var', array([10, 20, 30, 40, 50, 60]), ValueError, msg)
@pytest.mark.basic_source
@pytest.mark.parametrize(
'prop', ['indep_min', 'indep_max', 'indep_var', 'dep_var']
)
def test_cannot_delete_attributes_exceptions(self, prop):
"""
Test that del method raises an exception on all class attributes
"""
AROPROP(FUT(RDVAR, array([100, 200, 300])), prop) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log entries within the Google Stackdriver Logging API."""
import collections
import json
import re
from google.protobuf.any_pb2 import Any
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import Parse
from google.cloud.logging.resource import Resource
from google.cloud._helpers import _name_from_project_path
from google.cloud._helpers import _rfc3339_nanos_to_datetime
from google.cloud._helpers import _datetime_to_rfc3339
_GLOBAL_RESOURCE = Resource(type="global", labels={})
_LOGGER_TEMPLATE = re.compile(
r"""
projects/ # static prefix
(?P<project>[^/]+) # initial letter, wordchars + hyphen
/logs/ # static midfix
(?P<name>[^/]+) # initial letter, wordchars + allowed punc
""",
re.VERBOSE,
)
def logger_name_from_path(path):
"""Validate a logger URI path and get the logger name.
:type path: str
:param path: URI path for a logger API request.
:rtype: str
:returns: Logger name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
def _int_or_none(value):
"""Helper: return an integer or ``None``."""
if value is not None:
value = int(value)
return value
_LOG_ENTRY_FIELDS = ( # (name, default)
("log_name", None),
("labels", None),
("insert_id", None),
("severity", None),
("http_request", None),
("timestamp", None),
("resource", _GLOBAL_RESOURCE),
("trace", None),
("span_id", None),
("trace_sampled", None),
("source_location", None),
("operation", None),
("logger", None),
("payload", None),
)
_LogEntryTuple = collections.namedtuple(
"LogEntry", (field for field, _ in _LOG_ENTRY_FIELDS)
)
_LogEntryTuple.__new__.__defaults__ = tuple(default for _, default in _LOG_ENTRY_FIELDS)
_LOG_ENTRY_PARAM_DOCSTRING = """\
:type log_name: str
:param log_name: the name of the logger used to post the entry.
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type insert_id: text
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry
:type trace: str
:param trace: (optional) traceid to apply to the entry.
:type span_id: str
:param span_id: (optional) span_id within the trace for the log entry.
Specify the trace parameter if span_id is set.
:type trace_sampled: bool
:param trace_sampled: (optional) the sampling decision of the trace
associated with the log entry.
:type source_location: dict
:param source_location: (optional) location in source code from which
the entry was emitted.
:type operation: dict
:param operation: (optional) additional information about a potentially
long-running operation associated with the log entry.
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
"""
_LOG_ENTRY_SEE_ALSO_DOCSTRING = """\
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
class LogEntry(_LogEntryTuple):
__doc__ = (
"""
Log entry.
"""
+ _LOG_ENTRY_PARAM_DOCSTRING
+ _LOG_ENTRY_SEE_ALSO_DOCSTRING
)
received_timestamp = None
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return None
@classmethod
def from_api_repr(cls, resource, client, loggers=None):
"""Factory: construct an entry given its API representation
:type resource: dict
:param resource: text entry resource representation returned from
the API
:type client: :class:`google.cloud.logging.client.Client`
:param client: Client which holds credentials and project
configuration.
:type loggers: dict
:param loggers:
(Optional) A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: :class:`google.cloud.logging.entries.LogEntry`
:returns: Log entry parsed from ``resource``.
"""
if loggers is None:
loggers = {}
logger_fullname = resource["logName"]
logger = loggers.get(logger_fullname)
if logger is None:
logger_name = logger_name_from_path(logger_fullname)
logger = loggers[logger_fullname] = client.logger(logger_name)
payload = cls._extract_payload(resource)
insert_id = resource.get("insertId")
timestamp = resource.get("timestamp")
if timestamp is not None:
timestamp = _rfc3339_nanos_to_datetime(timestamp)
labels = resource.get("labels")
severity = resource.get("severity")
http_request = resource.get("httpRequest")
trace = resource.get("trace")
span_id = resource.get("spanId")
trace_sampled = resource.get("traceSampled")
source_location = resource.get("sourceLocation")
if source_location is not None:
line = source_location.pop("line", None)
source_location["line"] = _int_or_none(line)
operation = resource.get("operation")
monitored_resource_dict = resource.get("resource")
monitored_resource = None
if monitored_resource_dict is not None:
monitored_resource = Resource._from_dict(monitored_resource_dict)
inst = cls(
log_name=logger_fullname,
insert_id=insert_id,
timestamp=timestamp,
labels=labels,
severity=severity,
http_request=http_request,
resource=monitored_resource,
trace=trace,
span_id=span_id,
trace_sampled=trace_sampled,
source_location=source_location,
operation=operation,
logger=logger,
payload=payload,
)
received = resource.get("receiveTimestamp")
if received is not None:
inst.received_timestamp = _rfc3339_nanos_to_datetime(received)
return inst
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = {}
if self.log_name is not None:
info["logName"] = self.log_name
if self.resource is not None:
info["resource"] = self.resource._to_dict()
if self.labels is not None:
info["labels"] = self.labels
if self.insert_id is not None:
info["insertId"] = self.insert_id
if self.severity is not None:
info["severity"] = self.severity
if self.http_request is not None:
info["httpRequest"] = self.http_request
if self.timestamp is not None:
info["timestamp"] = _datetime_to_rfc3339(self.timestamp)
if self.trace is not None:
info["trace"] = self.trace
if self.span_id is not None:
info["spanId"] = self.span_id
if self.trace_sampled is not None:
info["traceSampled"] = self.trace_sampled
if self.source_location is not None:
source_location = self.source_location.copy()
source_location["line"] = str(source_location.pop("line", 0))
info["sourceLocation"] = source_location
if self.operation is not None:
info["operation"] = self.operation
return info
class TextEntry(LogEntry):
__doc__ = (
"""
Log entry with text payload.
"""
+ _LOG_ENTRY_PARAM_DOCSTRING
+ """
:type payload: str | unicode
:param payload: payload for the log entry.
"""
+ _LOG_ENTRY_SEE_ALSO_DOCSTRING
)
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource["textPayload"]
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(TextEntry, self).to_api_repr()
info["textPayload"] = self.payload
return info
class StructEntry(LogEntry):
__doc__ = (
"""
Log entry with JSON payload.
"""
+ _LOG_ENTRY_PARAM_DOCSTRING
+ """
:type payload: dict
:param payload: payload for the log entry.
"""
+ _LOG_ENTRY_SEE_ALSO_DOCSTRING
)
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource["jsonPayload"]
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(StructEntry, self).to_api_repr()
info["jsonPayload"] = self.payload
return info
class ProtobufEntry(LogEntry):
__doc__ = (
"""
Log entry with protobuf message payload.
"""
+ _LOG_ENTRY_PARAM_DOCSTRING
+ """
:type payload: protobuf message
:param payload: payload for the log entry.
"""
+ _LOG_ENTRY_SEE_ALSO_DOCSTRING
)
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource["protoPayload"]
@property
def payload_pb(self):
if isinstance(self.payload, Any):
return self.payload
@property
def payload_json(self):
if not isinstance(self.payload, Any):
return self.payload
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(ProtobufEntry, self).to_api_repr()
info["protoPayload"] = MessageToDict(self.payload)
return info
def parse_message(self, message):
"""Parse payload into a protobuf message.
Mutates the passed-in ``message`` in place.
:type message: Protobuf message
:param message: the message to be logged
"""
# NOTE: This assumes that ``payload`` is already a deserialized
# ``Any`` field and ``message`` has come from an imported
# ``pb2`` module with the relevant protobuf message type.
Parse(json.dumps(self.payload), message) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.plugin.tracing
import io.ktor.websocket.*
internal class WebSocketSessionTracer(
requestId: String,
tracer: Tracer,
private val delegate: DefaultWebSocketSession
) : DefaultWebSocketSession by delegate {
override val incoming = IncomingChannelTracer(requestId, tracer, delegate.incoming)
override val outgoing = OutgoingChannelTracer(requestId, tracer, delegate.outgoing)
override suspend fun send(frame: Frame) {
outgoing.send(frame)
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-plugins/ktor-client-tracing/common/src/WebSocketSessionTracer.kt |
function Component(props) {
for (const x in props.value) {
return x;
}
return null;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{value: {a: 'A!'}}],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/for-in-statement-body-always-returns.js |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.forms import ModelForm
# import Model Kursus
from elearning.models import Kursus
# import Form Kursus
from elearning.forms import KursusForm
# view halaman index
class IndexView(generic.ListView):
template_name= 'elearning/index.html'
context_object_name = 'latest_kursus_list'
def get_queryset(self):
return Kursus.objects.order_by('nama')[:10]
# view halaman kursus
class KursusView(generic.DetailView):
model = Kursus
template_name = 'elearning/kursus.html'
# view halaman tambah kursus
class KursusTambahView(CreateView):
model = Kursus
template_name = 'elearning/kursus/tambah.html'
form_class = KursusForm
def get_initial(self):
self.initial.update({ 'user': self.request.user })
return self.initial
def get_success_url(self):
return reverse('index') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.access_token
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the access token provider logic of
OAuth 1.0 RFC 5849. It validates the correctness of access token requests,
creates and persists tokens as well as create the proper response to be
returned to the client.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import urlencode
from .. import errors
from .base import BaseEndpoint
log = logging.getLogger(__name__)
class AccessTokenEndpoint(BaseEndpoint):
"""An endpoint responsible for providing OAuth 1 access tokens.
Typical use is to instantiate with a request validator and invoke the
``create_access_token_response`` from a view function. The tuple returned
has all information necessary (body, status, headers) to quickly form
and return a proper response. See :doc:`/oauth1/validator` for details on which
validator methods to implement for this endpoint.
"""
def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: An oauthlib.common.Request object.
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items())
def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code
def validate_access_token_request(self, request):
"""Validate an access token request.
:param request: An oauthlib.common.Request object.
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
description='Missing resource owner.')
if not self.request_validator.check_request_token(
request.resource_owner_key):
raise errors.InvalidRequestError(
description='Invalid resource owner key format.')
if not request.verifier:
raise errors.InvalidRequestError(
description='Missing verifier.')
if not self.request_validator.check_verifier(request.verifier):
raise errors.InvalidRequestError(
description='Invalid verifier format.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_request_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_request_token
# The server MUST verify (Section 3.2) the validity of the request,
# ensure that the resource owner has authorized the provisioning of
# token credentials to the client, and ensure that the temporary
# credentials have not expired or been used before. The server MUST
# also verify the verification code received from the client.
# .. _`Section 3.2`: http://tools.ietf.org/html/rfc5849#section-3.2
#
# Note that early exit would enable resource owner authorization
# verifier enumertion.
valid_verifier = self.request_validator.validate_verifier(
request.client_key,
request.resource_owner_key,
request.verifier,
request)
valid_signature = self._check_signature(request, is_token_request=True)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['verifier'] = valid_verifier
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_verifier,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client:, %s", valid_client)
log.info("Valid token:, %s", valid_resource_owner)
log.info("Valid verifier:, %s", valid_verifier)
log.info("Valid signature:, %s", valid_signature)
return v, request | unknown | codeparrot/codeparrot-clean | ||
{"files":{"Cargo.toml":"b1c07acb994f88418a0bbb0d393f76c4ca4276d9f4497ca734eb182286169ea8","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"d9ca174c5818d7885265f0242d3193dd2963c2ee1da84aed8b938ec6a6990e58","build.rs":"a5fdacb9913eeede5fc08f39acbd31b2508483fdc20c79d6bc274d74407a1816","src/crate_root.rs":"157ca402e23c32f11a4f1797c81afb5e9f08df96768012cf3e3199153aafb2dd","src/de/ignored_any.rs":"6480f2b2a83dc4764d01b2eec7309729eef2492eede2e5ee98d23a60b05198eb","src/de/impls.rs":"9fb7a0322af51971fe1fa8f5b56c7ca3716622a6e9127b86bddfc0742c11383c","src/de/mod.rs":"5ec8602d593915e0cf480b0ce67b02f6ab066dac34725237d2c7b4a6ef12a845","src/de/value.rs":"fb6fef6d23d95d516c6e1d6b5cefd8b98ba3881214a82a8a7e0a8ffbb0a12083","src/format.rs":"c85071b016df643b161859682d21ce34fa0ebf2a3bdbeeea69859da48f5d934f","src/lib.rs":"5789fbaa20497111e3efa2af8a3c1d18bb41c0c0480b542d26a2573e8ff5cf5c","src/macros.rs":"a61c9d19b210697304328e6bb9380a1de713e21042256df90a2b4553f178b0be","src/private/content.rs":"5fdfb2bb95ecc80375507acb813a4c640496385e56fc99ab448f6b19e01fcc01","src/private/doc.rs":"abe656c015267555ca26ebbcf2f4dcc52c719a0b9ade3a5ed4635b2784699b8c","src/private/mod.rs":"3bb3427ec80077b9df1853aa17681de796de0179d74871a96b88b72469de6cfc","src/private/seed.rs":"3f6e098c5bd314788370dcaf3ab0152fcd7feb6bcf36a9c51808938cd58071eb","src/private/size_hint.rs":"350694a2abaad94ca5d33958710a5bb8973a2ea1a3dcc50a41405c943761b81f","src/private/string.rs":"c1500fd4b64c24a5e45fa5f48c85c802816d6954a2999a72fc5a8861687212d4","src/ser/fmt.rs":"bd129d9f085933b76dafef6eb43ffac893c1f6484a3064dcd82faeeebc3b203c","src/ser/impls.rs":"5ee7efc439345e8665da0bd79bc06c02a0506e5fd0f3a4cf11af0c7197eaa643","src/ser/impossible.rs":"283f628d5107aa030d2e96eeb1dee187f0ac18c24d517edeb51738ab15dfb871","src/ser/mod.rs":"ec097d92c8545356961e977a4c9650361cadd1d3a243d805ae7b0e0e589ae803","src/std_error.rs":"b36fd6a2b6898770b9f1c51517eb362af115767d0f7cb4a713e1b949530ffa8a"},"package":"41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"} | json | github | https://github.com/nodejs/node | deps/crates/vendor/serde_core/.cargo-checksum.json |
'''
Created on Dec 29, 2012
@author: dstrauss
Copyright 2013 David Strauss
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
method definition for doing CG solves with a pipelined matrix-vector multiplication
'''
import numpy as np
def cg(A,b,maxiter=30,tol=1e-6,pll=False):
''' run CG iterations in order to solve the equation Ax=b,
A ==> a function that implements "matrix vector" multiplication, must be Positive Semidefinite
b ==> right hand side in Ax=b
maxiter ==> maximum number of CG iterations
tol ==> exit tolerance, if ||Ax-b|| < tol the program exits
pll ==> boolean flag for doing plotting or not
'''
x = np.zeros(b.size,dtype=b.dtype)
r=b-A(x)
p=r
rsold=np.dot(r.T,r)
rsn = list()
ix = 0
while ix < maxiter:
ix += 1
Ap = A(p);
alpha=rsold/np.dot(p.T,Ap);
x=x+alpha*p;
r=r-alpha*Ap
rsnew = np.dot(r.T,r)
rsn.append(np.sqrt(rsnew))
if np.sqrt(rsnew)<tol:
break
p = r+ (rsnew/rsold)*p;
rsold=rsnew;
if pll:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(rsn)
plt.title('rsn')
plt.show()
return x,ix
def test():
import scipy.sparse
opr = -np.ones((3,20))
opr[1,:] = 2
M = scipy.sparse.spdiags(opr, [-1,0,1], 20,20);
b = np.zeros(20)
b[9] = 1
cg(lambda x: M*x,b) | unknown | codeparrot/codeparrot-clean | ||
"""
Monitors home energy use for the eliq online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.eliqonline/
"""
import logging
from urllib.error import URLError
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_NAME, STATE_UNKNOWN
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['eliqonline==1.0.11']
DEFAULT_NAME = "ELIQ Energy Usage"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Eliq sensor."""
import eliqonline
access_token = config.get(CONF_ACCESS_TOKEN)
name = config.get(CONF_NAME, DEFAULT_NAME)
channel_id = config.get("channel_id")
if access_token is None:
_LOGGER.error(
"Configuration Error: "
"Please make sure you have configured your access token "
"that can be aquired from https://my.eliq.se/user/settings/api")
return False
api = eliqonline.API(access_token)
add_devices([EliqSensor(api, channel_id, name)])
class EliqSensor(Entity):
"""Implements a Eliq sensor."""
def __init__(self, api, channel_id, name):
self._name = name
self._unit_of_measurement = "W"
self._state = STATE_UNKNOWN
self.api = api
self.channel_id = channel_id
self.update()
@property
def name(self):
"""Returns the name of the sensor."""
return self._name
@property
def icon(self):
"""Returns icon."""
return "mdi:speedometer"
@property
def unit_of_measurement(self):
"""Unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def state(self):
"""Returns the state of the device."""
return self._state
def update(self):
"""Gets the latest data."""
try:
response = self.api.get_data_now(channelid=self.channel_id)
self._state = int(response.power)
except (TypeError, URLError):
_LOGGER.error("Could not connect to the eliqonline servers") | unknown | codeparrot/codeparrot-clean | ||
import unittest
import os
from pkget import Yaml
class YamlTest(unittest.TestCase):
def test_load_all(self):
def test_load_all(*args, **kwargs):
result = []
for item in Yaml.load_all(*args, **kwargs):
result.append(item)
return result
def assert_yaml_file_1(result, index=0):
self.assertEqual(result[index]["default"]["recipepaths"], [None])
self.assertEqual(
result[index]["default"]["installprefix"], "~/.local")
self.assertEqual(
result[index]["default"]["pkginfoprefix"], "~/.local/pkgetinfo")
self.assertEqual(
result[index]["local"]["recipepaths"], ["/usr/local", None])
self.assertEqual(
result[index]["local"]["installprefix"], "/usr/local/")
self.assertEqual(
result[index]["local"]["pkginfoprefix"], "/usr/local/pkgetinfo")
self.assertEqual(
result[1 + index]["global"]["recipepaths"], None)
self.assertEqual(
result[1 + index]["global"]["installprefix"], "/usr/local/")
self.assertEqual(
result[1 + index]["global"]["pkginfoprefix"],
"/usr/local/pkgetinfo")
def assert_yaml_file_2(result, index=0):
self.assertEqual(
result[index]["etcd"]["description"],
"Highly-available key value store for shared configuration and \
service discovery")
self.assertEqual(
result[index]["etcd"]["website"], "https://github.com/coreos/etcd")
self.assertEqual(result[index]["etcd"]["type"], "tar.gz")
self.assertEqual(result[index]["etcd"]["version"], "3.2.5")
self.assertEqual(result[index]["etcd"]["os"], "linux")
self.assertEqual(result[index]["etcd"]["arch"], "amd64")
self.assertEqual(result[index]["etcd"]["url"], None)
self.assertEqual(
result[index]["etcd"]["urlprefix"],
"https://github.com/coreos/etcd/releases/download")
self.assertEqual(result[index]["etcd"]["depends"], None)
result = test_load_all(
filenames=os.path.join(
os.path.dirname(__file__), "test_yaml_1.yaml"))
assert_yaml_file_1(result)
result = test_load_all(
filenames=[os.path.join(
os.path.dirname(__file__), "test_yaml_1.yaml"),
os.path.join(
os.path.dirname(__file__), "test_yaml_2.yaml")])
assert_yaml_file_1(result)
assert_yaml_file_2(result, 2)
result = test_load_all(contents="--- \n\
a: 12 \n\
b: df \n")
self.assertEqual(result[0]["a"], 12)
self.assertEqual(result[0]["b"], "df")
result = test_load_all(
contents=["---\na: 23\nb: ddd\n",
"---\na: df\nb: 45\n---\ng: 34\n"])
self.assertEqual(result[0]["a"], 23)
self.assertEqual(result[0]["b"], "ddd")
self.assertEqual(result[1]["a"], "df")
self.assertEqual(result[1]["b"], 45)
self.assertEqual(result[2]["g"], 34)
result = test_load_all(
contents=["---\na: 23\nb: ddd\n",
"---\na: df\nb: 45\n---\ng: 34\n"],
filenames=[os.path.join(os.path.dirname(__file__),
"test_yaml_1.yaml"),
os.path.join(os.path.dirname(__file__),
"test_yaml_2.yaml")])
self.assertEqual(result[0]["a"], 23)
self.assertEqual(result[0]["b"], "ddd")
self.assertEqual(result[1]["a"], "df")
self.assertEqual(result[1]["b"], 45)
self.assertEqual(result[2]["g"], 34)
assert_yaml_file_1(result, 3)
assert_yaml_file_2(result, 5) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def keyword(name=None, tags=()):
"""Decorator to set custom keyword names and tags to functions and methods.
This decorator creates the ``robot_name`` and ``robot_tags`` attributes on
the decorated keyword method or function. Robot Framework checks them to
determine the keyword's name and tags, respectively.
library.py::
@keyword(name='Login Via User Panel')
def login(username, password):
# ...
@keyword(name='Logout Via User Panel', tags=['example', 'tags'])
def logout():
# ...
tests.robot::
Login Via User Panel myusername mypassword
If ``name`` is not given, the actual name of the keyword will not be
affected, but the ``robot_name`` attribute will still be created.
This can be useful for marking methods as keywords in a dynamic library.
In this usage it is possible to also omit parenthesis when using the
decorator::
@keyword
def func():
# ...
"""
if callable(name):
return keyword()(name)
def _method_wrapper(func):
func.robot_name = name
func.robot_tags = tags
return func
return _method_wrapper | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_RENDERERS_NAMESPACE_RENDERER_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_RENDERERS_NAMESPACE_RENDERER_H_
#include <vector>
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
class NamespaceRenderer : public Renderer {
public:
explicit NamespaceRenderer(RendererContext context);
void Open();
void Close();
};
} // namespace cpp
} // namespace generator
} // namespace tensorflow
#endif // TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_RENDERERS_NAMESPACE_RENDERER_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/c/experimental/ops/gen/cpp/renderers/namespace_renderer.h |
//! Logic for lowering higher-kinded outlives constraints
//! (with placeholders and universes) and turn them into regular
//! outlives constraints.
use rustc_data_structures::frozen::Frozen;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::graph::scc;
use rustc_data_structures::graph::scc::Sccs;
use rustc_index::IndexVec;
use rustc_infer::infer::RegionVariableOrigin;
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::{RegionVid, UniverseIndex};
use tracing::{debug, trace};
use crate::constraints::{ConstraintSccIndex, OutlivesConstraintSet};
use crate::consumers::OutlivesConstraint;
use crate::diagnostics::UniverseInfo;
use crate::region_infer::values::{LivenessValues, PlaceholderIndices};
use crate::region_infer::{ConstraintSccs, RegionDefinition, Representative, TypeTest};
use crate::ty::VarianceDiagInfo;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::type_check::{Locations, MirTypeckRegionConstraints};
use crate::universal_regions::UniversalRegions;
use crate::{BorrowckInferCtxt, NllRegionVariableOrigin};
/// A set of outlives constraints after rewriting to remove
/// higher-kinded constraints.
pub(crate) struct LoweredConstraints<'tcx> {
pub(crate) constraint_sccs: Sccs<RegionVid, ConstraintSccIndex>,
pub(crate) definitions: Frozen<IndexVec<RegionVid, RegionDefinition<'tcx>>>,
pub(crate) scc_annotations: IndexVec<ConstraintSccIndex, RegionTracker>,
pub(crate) outlives_constraints: Frozen<OutlivesConstraintSet<'tcx>>,
pub(crate) type_tests: Vec<TypeTest<'tcx>>,
pub(crate) liveness_constraints: LivenessValues,
pub(crate) universe_causes: FxIndexMap<UniverseIndex, UniverseInfo<'tcx>>,
pub(crate) placeholder_indices: PlaceholderIndices<'tcx>,
}
impl<'d, 'tcx, A: scc::Annotation> SccAnnotations<'d, 'tcx, A> {
pub(crate) fn init(definitions: &'d IndexVec<RegionVid, RegionDefinition<'tcx>>) -> Self {
Self { scc_to_annotation: IndexVec::new(), definitions }
}
}
/// A Visitor for SCC annotation construction.
pub(crate) struct SccAnnotations<'d, 'tcx, A: scc::Annotation> {
pub(crate) scc_to_annotation: IndexVec<ConstraintSccIndex, A>,
definitions: &'d IndexVec<RegionVid, RegionDefinition<'tcx>>,
}
impl scc::Annotations<RegionVid> for SccAnnotations<'_, '_, RegionTracker> {
fn new(&self, element: RegionVid) -> RegionTracker {
RegionTracker::new(element, &self.definitions[element])
}
fn annotate_scc(&mut self, scc: ConstraintSccIndex, annotation: RegionTracker) {
let idx = self.scc_to_annotation.push(annotation);
assert!(idx == scc);
}
type Ann = RegionTracker;
type SccIdx = ConstraintSccIndex;
}
#[derive(Copy, Debug, Clone, PartialEq, Eq)]
struct PlaceholderReachability {
/// The largest-universed placeholder we can reach
max_universe: (UniverseIndex, RegionVid),
/// The placeholder with the smallest ID
min_placeholder: RegionVid,
/// The placeholder with the largest ID
max_placeholder: RegionVid,
}
impl PlaceholderReachability {
/// Merge the reachable placeholders of two graph components.
fn merge(&mut self, other: &Self) {
self.max_universe = self.max_universe.max(other.max_universe);
self.min_placeholder = self.min_placeholder.min(other.min_placeholder);
self.max_placeholder = self.max_placeholder.max(other.max_placeholder);
}
}
/// An annotation for region graph SCCs that tracks
/// the values of its elements. This annotates a single SCC.
#[derive(Copy, Debug, Clone)]
pub(crate) struct RegionTracker {
reachable_placeholders: Option<PlaceholderReachability>,
/// The largest universe nameable from this SCC.
/// It is the smallest nameable universes of all
/// existential regions reachable from it. Small Rvids are preferred.
max_nameable_universe: (UniverseIndex, RegionVid),
/// The representative Region Variable Id for this SCC.
pub(crate) representative: Representative,
}
impl RegionTracker {
pub(crate) fn new(rvid: RegionVid, definition: &RegionDefinition<'_>) -> Self {
let reachable_placeholders =
if matches!(definition.origin, NllRegionVariableOrigin::Placeholder(_)) {
Some(PlaceholderReachability {
max_universe: (definition.universe, rvid),
min_placeholder: rvid,
max_placeholder: rvid,
})
} else {
None
};
Self {
reachable_placeholders,
max_nameable_universe: (definition.universe, rvid),
representative: Representative::new(rvid, definition),
}
}
/// The largest universe this SCC can name. It's the smallest
/// largest nameable universe of any reachable region, or
/// `max_nameable(r) = min (max_nameable(r') for r' reachable from r)`
pub(crate) fn max_nameable_universe(self) -> UniverseIndex {
self.max_nameable_universe.0
}
pub(crate) fn max_placeholder_universe_reached(self) -> UniverseIndex {
self.reachable_placeholders.map(|pls| pls.max_universe.0).unwrap_or(UniverseIndex::ROOT)
}
/// Can all reachable placeholders be named from `from`?
/// True vacuously in case no placeholders were reached.
fn placeholders_can_be_named_by(&self, from: UniverseIndex) -> bool {
self.reachable_placeholders.is_none_or(|pls| from.can_name(pls.max_universe.0))
}
/// Determine if we can name all the placeholders in `other`.
pub(crate) fn can_name_all_placeholders(&self, other: Self) -> bool {
// HACK: We first check whether we can name the highest existential universe
// of `other`. This only exists to avoid errors in case that scc already
// depends on a placeholder it cannot name itself.
self.max_nameable_universe().can_name(other.max_nameable_universe())
|| other.placeholders_can_be_named_by(self.max_nameable_universe.0)
}
/// If this SCC reaches a placeholder it can't name, return it.
fn unnameable_placeholder(&self) -> Option<(UniverseIndex, RegionVid)> {
self.reachable_placeholders
.filter(|pls| !self.max_nameable_universe().can_name(pls.max_universe.0))
.map(|pls| pls.max_universe)
}
}
impl scc::Annotation for RegionTracker {
fn update_scc(&mut self, other: &Self) {
trace!("{:?} << {:?}", self.representative, other.representative);
self.representative = self.representative.min(other.representative);
self.update_reachable(other);
}
fn update_reachable(&mut self, other: &Self) {
self.max_nameable_universe = self.max_nameable_universe.min(other.max_nameable_universe);
match (self.reachable_placeholders.as_mut(), other.reachable_placeholders.as_ref()) {
(None, None) | (Some(_), None) => (),
(None, Some(theirs)) => self.reachable_placeholders = Some(*theirs),
(Some(ours), Some(theirs)) => ours.merge(theirs),
};
}
}
/// Determines if the region variable definitions contain
/// placeholders, and compute them for later use.
// FIXME: This is also used by opaque type handling. Move it to a separate file.
pub(super) fn region_definitions<'tcx>(
infcx: &BorrowckInferCtxt<'tcx>,
universal_regions: &UniversalRegions<'tcx>,
) -> (Frozen<IndexVec<RegionVid, RegionDefinition<'tcx>>>, bool) {
let var_infos = infcx.get_region_var_infos();
// Create a RegionDefinition for each inference variable. This happens here because
// it allows us to sneak in a cheap check for placeholders. Otherwise, its proper home
// is in `RegionInferenceContext::new()`, probably.
let mut definitions = IndexVec::with_capacity(var_infos.len());
let mut has_placeholders = false;
for info in var_infos.iter() {
let origin = match info.origin {
RegionVariableOrigin::Nll(origin) => origin,
_ => NllRegionVariableOrigin::Existential { name: None },
};
let definition = RegionDefinition { origin, universe: info.universe, external_name: None };
has_placeholders |= matches!(origin, NllRegionVariableOrigin::Placeholder(_));
definitions.push(definition);
}
// Add external names from universal regions in fun function definitions.
// FIXME: this two-step method is annoying, but I don't know how to avoid it.
for (external_name, variable) in universal_regions.named_universal_regions_iter() {
debug!("region {:?} has external name {:?}", variable, external_name);
definitions[variable].external_name = Some(external_name);
}
(Frozen::freeze(definitions), has_placeholders)
}
/// This method handles placeholders by rewriting the constraint
/// graph. For each strongly connected component in the constraint
/// graph such that there is a series of constraints
/// A: B: C: ... : X where
/// A contains a placeholder whose universe cannot be named by X,
/// add a constraint that A: 'static. This is a safe upper bound
/// in the face of borrow checker/trait solver limitations that will
/// eventually go away.
///
/// For a more precise definition, see the documentation for
/// [`RegionTracker`] and its methods!
///
/// This edge case used to be handled during constraint propagation.
/// It was rewritten as part of the Polonius project with the goal of moving
/// higher-kindedness concerns out of the path of the borrow checker,
/// for two reasons:
///
/// 1. Implementing Polonius is difficult enough without also
/// handling them.
/// 2. The long-term goal is to handle higher-kinded concerns
/// in the trait solver, where they belong. This avoids
/// logic duplication and allows future trait solvers
/// to compute better bounds than for example our
/// "must outlive 'static" here.
///
/// This code is a stop-gap measure in preparation for the future trait solver.
///
/// Every constraint added by this method is an internal `IllegalUniverse` constraint.
pub(crate) fn compute_sccs_applying_placeholder_outlives_constraints<'tcx>(
constraints: MirTypeckRegionConstraints<'tcx>,
universal_region_relations: &Frozen<UniversalRegionRelations<'tcx>>,
infcx: &BorrowckInferCtxt<'tcx>,
) -> LoweredConstraints<'tcx> {
let universal_regions = &universal_region_relations.universal_regions;
let (definitions, has_placeholders) = region_definitions(infcx, universal_regions);
let MirTypeckRegionConstraints {
placeholder_indices,
placeholder_index_to_region: _,
liveness_constraints,
mut outlives_constraints,
universe_causes,
type_tests,
} = constraints;
let fr_static = universal_regions.fr_static;
let compute_sccs =
|constraints: &OutlivesConstraintSet<'tcx>,
annotations: &mut SccAnnotations<'_, 'tcx, RegionTracker>| {
ConstraintSccs::new_with_annotation(
&constraints.graph(definitions.len()).region_graph(constraints, fr_static),
annotations,
)
};
let mut scc_annotations = SccAnnotations::init(&definitions);
let constraint_sccs = compute_sccs(&outlives_constraints, &mut scc_annotations);
// This code structure is a bit convoluted because it allows for a planned
// future change where the early return here has a different type of annotation
// that does much less work.
if !has_placeholders {
debug!("No placeholder regions found; skipping rewriting logic!");
return LoweredConstraints {
type_tests,
constraint_sccs,
scc_annotations: scc_annotations.scc_to_annotation,
definitions,
outlives_constraints: Frozen::freeze(outlives_constraints),
liveness_constraints,
universe_causes,
placeholder_indices,
};
}
debug!("Placeholders present; activating placeholder handling logic!");
let added_constraints = rewrite_placeholder_outlives(
&constraint_sccs,
&scc_annotations,
fr_static,
&mut outlives_constraints,
);
let (constraint_sccs, scc_annotations) = if added_constraints {
let mut annotations = SccAnnotations::init(&definitions);
// We changed the constraint set and so must recompute SCCs.
// Optimisation opportunity: if we can add them incrementally (and that's
// possible because edges to 'static always only merge SCCs into 'static),
// we would potentially save a lot of work here.
(compute_sccs(&outlives_constraints, &mut annotations), annotations.scc_to_annotation)
} else {
// If we didn't add any back-edges; no more work needs doing
debug!("No constraints rewritten!");
(constraint_sccs, scc_annotations.scc_to_annotation)
};
LoweredConstraints {
constraint_sccs,
definitions,
scc_annotations,
outlives_constraints: Frozen::freeze(outlives_constraints),
type_tests,
liveness_constraints,
universe_causes,
placeholder_indices,
}
}
pub(crate) fn rewrite_placeholder_outlives<'tcx>(
sccs: &Sccs<RegionVid, ConstraintSccIndex>,
annotations: &SccAnnotations<'_, '_, RegionTracker>,
fr_static: RegionVid,
outlives_constraints: &mut OutlivesConstraintSet<'tcx>,
) -> bool {
// Changed to `true` if we added any constraints and need to
// recompute SCCs.
let mut added_constraints = false;
let annotations = &annotations.scc_to_annotation;
for scc in sccs.all_sccs() {
// No point in adding 'static: 'static!
// This micro-optimisation makes somewhat sense
// because static outlives *everything*.
if scc == sccs.scc(fr_static) {
continue;
}
let annotation = annotations[scc];
let Some((max_u, max_u_rvid)) = annotation.unnameable_placeholder() else {
continue;
};
debug!(
"Placeholder universe {max_u:?} is too large for its SCC, represented by {:?}",
annotation.representative
);
// We only add one `r: 'static` constraint per SCC, where `r` is the SCC representative.
// That constraint is annotated with some placeholder `unnameable` where
// `unnameable` is unnameable from `r` and there is a path in the constraint graph
// between them.
//
// There is one exception; if some other region in this SCC can't name `'r`, then
// we pick the region with the smallest universe in the SCC, so that a path can
// always start in `'r` to find a motivation that isn't cyclic.
let blame_to = if annotation.representative.rvid() == max_u_rvid {
// Assertion: the region that lowered our universe is an existential one and we are a placeholder!
// The SCC's representative is not nameable from some region
// that ends up in the SCC.
let small_universed_rvid = annotation.max_nameable_universe.1;
debug!(
"{small_universed_rvid:?} lowered our universe to {:?}",
annotation.max_nameable_universe()
);
small_universed_rvid
} else {
// `max_u_rvid` is not nameable by the SCC's representative.
max_u_rvid
};
// FIXME: if we can extract a useful blame span here, future error
// reporting and constraint search can be simplified.
added_constraints = true;
outlives_constraints.push(OutlivesConstraint {
sup: annotation.representative.rvid(),
sub: fr_static,
category: ConstraintCategory::OutlivesUnnameablePlaceholder(blame_to),
locations: Locations::All(rustc_span::DUMMY_SP),
span: rustc_span::DUMMY_SP,
variance_info: VarianceDiagInfo::None,
from_closure: false,
});
}
added_constraints
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_borrowck/src/handle_placeholders.rs |
##########################################################
# THIS IS A GENERATED FILE -- DO NOT MODIFY.
# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
# AND REGENERATE THE MATRIX SUITES.
#
# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/sharding_max_mirroring_opportunistic_secondary_targeting.yml
# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
##########################################################
description:
This suite is a combination of sharding_max_mirroring, and sharding_opportunistic_secondary_targeting.
You can run any of these tests individually to debug any issues that might arrise.
executor:
config:
shell_options:
global_vars:
TestData:
setParameters:
mirrorReads: "{samplingRate: 1.0}"
setParametersMongos:
opportunisticSecondaryTargeting: true
nodb: ""
matrix_suite: true
selector:
exclude_files:
- jstests/sharding/**/libs/**/*.js
- jstests/sharding/query/map_reduce/mrShardedOutput.js
roots:
- jstests/sharding/**/*.js
test_kind: js_test | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_max_mirroring_opportunistic_secondary_targeting.yml |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"]
class InputAudio(TypedDict, total=False):
data: Required[str]
"""Base64 encoded audio data."""
format: Required[Literal["wav", "mp3"]]
"""The format of the encoded audio data. Currently supports "wav" and "mp3"."""
class ChatCompletionContentPartInputAudioParam(TypedDict, total=False):
"""Learn about [audio inputs](https://platform.openai.com/docs/guides/audio)."""
input_audio: Required[InputAudio]
type: Required[Literal["input_audio"]]
"""The type of the content part. Always `input_audio`.""" | python | github | https://github.com/openai/openai-python | src/openai/types/chat/chat_completion_content_part_input_audio_param.py |
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2005-2012, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
//-----------------------------------------------------------------------------
//
// class Thread -- implementation for
// platforms that support Posix threads
//
//-----------------------------------------------------------------------------
#include "IlmBaseConfig.h"
#if HAVE_PTHREAD
#ifdef ILMBASE_FORCE_CXX03
#include "IlmThread.h"
#include "Iex.h"
#include <assert.h>
extern "C"
{
typedef void * (* Start) (void *);
}
ILMTHREAD_INTERNAL_NAMESPACE_SOURCE_ENTER
bool
supportsThreads ()
{
return true;
}
namespace {
void
threadLoop (void * t)
{
return (reinterpret_cast<Thread*>(t))->run();
}
} // namespace
Thread::Thread ()
{
// empty
}
Thread::~Thread ()
{
int error = ::pthread_join (_thread, 0);
assert (error == 0);
}
void
Thread::start ()
{
if (int error = ::pthread_create (&_thread, 0, Start (threadLoop), this))
IEX_NAMESPACE::throwErrnoExc ("Cannot create new thread (%T).", error);
}
ILMTHREAD_INTERNAL_NAMESPACE_SOURCE_EXIT
#endif
#endif | cpp | github | https://github.com/opencv/opencv | 3rdparty/openexr/IlmThread/IlmThreadPosix.cpp |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QLabel, QGridLayout
from PyQt5.QtWidgets import QLineEdit, QPushButton, QHBoxLayout
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import Qt
class Kalkulator(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.interfejs()
def interfejs(self):
# etykiety
etykieta1 = QLabel("Liczba 1:", self)
etykieta2 = QLabel("Liczba 2:", self)
etykieta3 = QLabel("Wynik:", self)
# przypisanie widgetów do układu tabelarycznego
ukladT = QGridLayout()
ukladT.addWidget(etykieta1, 0, 0)
ukladT.addWidget(etykieta2, 0, 1)
ukladT.addWidget(etykieta3, 0, 2)
# 1-liniowe pola edycyjne
self.liczba1Edt = QLineEdit()
self.liczba2Edt = QLineEdit()
self.wynikEdt = QLineEdit()
self.wynikEdt.readonly = True
self.wynikEdt.setToolTip('Wpisz <b>liczby</b> i wybierz działanie...')
ukladT.addWidget(self.liczba1Edt, 1, 0)
ukladT.addWidget(self.liczba2Edt, 1, 1)
ukladT.addWidget(self.wynikEdt, 1, 2)
# przyciski
dodajBtn = QPushButton("&Dodaj", self)
odejmijBtn = QPushButton("&Odejmij", self)
dzielBtn = QPushButton("&Mnóż", self)
mnozBtn = QPushButton("D&ziel", self)
koniecBtn = QPushButton("&Koniec", self)
koniecBtn.resize(koniecBtn.sizeHint())
ukladH = QHBoxLayout()
ukladH.addWidget(dodajBtn)
ukladH.addWidget(odejmijBtn)
ukladH.addWidget(dzielBtn)
ukladH.addWidget(mnozBtn)
ukladT.addLayout(ukladH, 2, 0, 1, 3)
ukladT.addWidget(koniecBtn, 3, 0, 1, 3)
# przypisanie utworzonego układu do okna
self.setLayout(ukladT)
koniecBtn.clicked.connect(self.koniec)
dodajBtn.clicked.connect(self.dzialanie)
odejmijBtn.clicked.connect(self.dzialanie)
mnozBtn.clicked.connect(self.dzialanie)
dzielBtn.clicked.connect(self.dzialanie)
self.setGeometry(20, 20, 300, 100)
self.setWindowIcon(QIcon('kalkulator.png'))
self.setWindowTitle("Prosty kalkulator")
self.show()
def koniec(self):
self.close()
def closeEvent(self, event):
odp = QMessageBox.question(
self, 'Komunikat',
"Czy na pewno koniec?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if odp == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
def dzialanie(self):
nadawca = self.sender()
try:
liczba1 = float(self.liczba1Edt.text())
liczba2 = float(self.liczba2Edt.text())
wynik = ""
if nadawca.text() == "&Dodaj":
wynik = liczba1 + liczba2
else:
pass
self.wynikEdt.setText(str(wynik))
except ValueError:
QMessageBox.warning(self, "Błąd", "Błędne dane", QMessageBox.Ok)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
okno = Kalkulator()
sys.exit(app.exec_()) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.