code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
{
'name': 'Events Sales',
'version': '1.1',
'category': 'Tools',
'website': 'https://www.odoo.com/page/events',
'description': """
Creating registration with sale orders.
=======================================
This module allows you to automate and connect your registration creation with
your main sale flow and therefore, to enable the invoicing feature of registrations.
It defines a new kind of service products that offers you the possibility to
choose an event category associated with it. When you encode a sale order for
that product, you will be able to choose an existing event of that category and
when you confirm your sale order it will automatically create a registration for
this event.
""",
'depends': ['event', 'sale'],
'data': [
'views/event.xml',
'views/product.xml',
'views/sale_order.xml',
'event_sale_data.xml',
'report/event_event_templates.xml',
'security/ir.model.access.csv',
'wizard/event_edit_registration.xml',
],
'demo': ['event_demo.xml'],
'test': ['test/confirm.yml'],
'installable': True,
'auto_install': True
} | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
import sys
import os
import errno
import shared
from multiprocessing import Process
class singleinstance:
"""
Implements a single instance application by creating a lock file at appdata.
This is based upon the singleton class from tendo https://github.com/pycontribs/tendo
which is under the Python Software Foundation License version 2
"""
def __init__(self, flavor_id="", daemon=False):
import sys
self.initialized = False
self.daemon = daemon;
self.lockfile = os.path.normpath(os.path.join(shared.appdata, 'singleton%s.lock' % flavor_id))
if not self.daemon:
# Tells the already running (if any) application to get focus.
import bitmessageqt
bitmessageqt.init()
if sys.platform == 'win32':
try:
# file already exists, we try to remove (in case previous execution was interrupted)
if os.path.exists(self.lockfile):
os.unlink(self.lockfile)
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError:
type, e, tb = sys.exc_info()
if e.errno == 13:
print 'Another instance of this application is already running'
sys.exit(-1)
print(e.errno)
raise
else: # non Windows
import fcntl # @UnresolvedImport
self.fp = open(self.lockfile, 'w')
try:
fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print 'Another instance of this application is already running'
sys.exit(-1)
self.initialized = True
def __del__(self):
import sys
if not self.initialized:
return
try:
if sys.platform == 'win32':
if hasattr(self, 'fd'):
os.close(self.fd)
os.unlink(self.lockfile)
else:
import fcntl # @UnresolvedImport
fcntl.lockf(self.fp, fcntl.LOCK_UN)
if os.path.isfile(self.lockfile):
os.unlink(self.lockfile)
except Exception, e:
sys.exit(-1) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
The package for the psi statistics computation
"""
import numpy as np
try:
from scipy import weave
def _psicomputations(variance, lengthscale, Z, variational_posterior):
"""
Z - MxQ
mu - NxQ
S - NxQ
gamma - NxQ
"""
# here are the "statistics" for psi0, psi1 and psi2
# Produced intermediate results:
# _psi1 NxM
mu = variational_posterior.mean
S = variational_posterior.variance
N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1]
l2 = np.square(lengthscale)
log_denom1 = np.log(S/l2+1)
log_denom2 = np.log(2*S/l2+1)
log_gamma,log_gamma1 = variational_posterior.gamma_log_prob()
variance = float(variance)
psi0 = np.empty(N)
psi0[:] = variance
psi1 = np.empty((N,M))
psi2n = np.empty((N,M,M))
from ....util.misc import param_to_array
S = param_to_array(S)
mu = param_to_array(mu)
Z = param_to_array(Z)
support_code = """
#include <math.h>
"""
code = """
for(int n=0; n<N; n++) {
for(int m1=0;m1<M;m1++) {
double log_psi1=0;
for(int m2=0;m2<=m1;m2++) {
double log_psi2_n=0;
for(int q=0;q<Q;q++) {
double Snq = S(n,q);
double lq = l2(q);
double Zm1q = Z(m1,q);
double Zm2q = Z(m2,q);
if(m2==0) {
// Compute Psi_1
double muZ = mu(n,q)-Z(m1,q);
double psi1_exp1 = log_gamma(n,q) - (muZ*muZ/(Snq+lq) +log_denom1(n,q))/2.;
double psi1_exp2 = log_gamma1(n,q) -Zm1q*Zm1q/(2.*lq);
log_psi1 += (psi1_exp1>psi1_exp2)?psi1_exp1+log1p(exp(psi1_exp2-psi1_exp1)):psi1_exp2+log1p(exp(psi1_exp1-psi1_exp2));
}
// Compute Psi_2
double muZhat = mu(n,q) - (Zm1q+Zm2q)/2.;
double Z2 = Zm1q*Zm1q+ Zm2q*Zm2q;
double dZ = Zm1q - Zm2q;
double psi2_exp1 = dZ*dZ/(-4.*lq)-muZhat*muZhat/(2.*Snq+lq) - log_denom2(n,q)/2. + log_gamma(n,q);
double psi2_exp2 = log_gamma1(n,q) - Z2/(2.*lq);
log_psi2_n += (psi2_exp1>psi2_exp2)?psi2_exp1+log1p(exp(psi2_exp2-psi2_exp1)):psi2_exp2+log1p(exp(psi2_exp1-psi2_exp2));
}
double exp_psi2_n = exp(log_psi2_n);
psi2n(n,m1,m2) = variance*variance*exp_psi2_n;
if(m1!=m2) { psi2n(n,m2,m1) = variance*variance*exp_psi2_n;}
}
psi1(n,m1) = variance*exp(log_psi1);
}
}
"""
weave.inline(code, support_code=support_code, arg_names=['psi1','psi2n','N','M','Q','variance','l2','Z','mu','S','log_denom1','log_denom2','log_gamma','log_gamma1'], type_converters=weave.converters.blitz)
psi2 = psi2n.sum(axis=0)
return psi0,psi1,psi2,psi2n
from GPy.util.caching import Cacher
psicomputations = Cacher(_psicomputations, limit=1)
def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
ARD = (len(lengthscale)!=1)
_,psi1,_,psi2n = psicomputations(variance, lengthscale, Z, variational_posterior)
mu = variational_posterior.mean
S = variational_posterior.variance
N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1]
l2 = np.square(lengthscale)
log_denom1 = np.log(S/l2+1)
log_denom2 = np.log(2*S/l2+1)
log_gamma,log_gamma1 = variational_posterior.gamma_log_prob()
gamma, gamma1 = variational_posterior.gamma_probabilities()
variance = float(variance)
dvar = np.zeros(1)
dmu = np.zeros((N,Q))
dS = np.zeros((N,Q))
dgamma = np.zeros((N,Q))
dl = np.zeros(Q)
dZ = np.zeros((M,Q))
dvar += np.sum(dL_dpsi0)
from ....util.misc import param_to_array
S = param_to_array(S)
mu = param_to_array(mu)
Z = param_to_array(Z)
support_code = """
#include <math.h>
"""
code = """
for(int n=0; n<N; n++) {
for(int m1=0;m1<M;m1++) {
double log_psi1=0;
for(int m2=0;m2<M;m2++) {
double log_psi2_n=0;
for(int q=0;q<Q;q++) {
double Snq = S(n,q);
double lq = l2(q);
double Zm1q = Z(m1,q);
double Zm2q = Z(m2,q);
double gnq = gamma(n,q);
double g1nq = gamma1(n,q);
double mu_nq = mu(n,q);
if(m2==0) {
// Compute Psi_1
double lpsi1 = psi1(n,m1)*dL_dpsi1(n,m1);
if(q==0) {dvar(0) += lpsi1/variance;}
double Zmu = Zm1q - mu_nq;
double denom = Snq+lq;
double Zmu2_denom = Zmu*Zmu/denom;
double exp1 = log_gamma(n,q)-(Zmu*Zmu/(Snq+lq)+log_denom1(n,q))/(2.);
double exp2 = log_gamma1(n,q)-Zm1q*Zm1q/(2.*lq);
double d_exp1,d_exp2;
if(exp1>exp2) {
d_exp1 = 1.;
d_exp2 = exp(exp2-exp1);
} else {
d_exp1 = exp(exp1-exp2);
d_exp2 = 1.;
}
double exp_sum = d_exp1+d_exp2;
dmu(n,q) += lpsi1*Zmu*d_exp1/(denom*exp_sum);
dS(n,q) += lpsi1*(Zmu2_denom-1.)*d_exp1/(denom*exp_sum)/2.;
dgamma(n,q) += lpsi1*(d_exp1*g1nq-d_exp2*gnq)/exp_sum;
dl(q) += lpsi1*((Zmu2_denom+Snq/lq)/denom*d_exp1+Zm1q*Zm1q/(lq*lq)*d_exp2)/(2.*exp_sum);
dZ(m1,q) += lpsi1*(-Zmu/denom*d_exp1-Zm1q/lq*d_exp2)/exp_sum;
}
// Compute Psi_2
double lpsi2 = psi2n(n,m1,m2)*dL_dpsi2(m1,m2);
if(q==0) {dvar(0) += lpsi2*2/variance;}
double dZm1m2 = Zm1q - Zm2q;
double Z2 = Zm1q*Zm1q+Zm2q*Zm2q;
double muZhat = mu_nq - (Zm1q + Zm2q)/2.;
double denom = 2.*Snq+lq;
double muZhat2_denom = muZhat*muZhat/denom;
double exp1 = dZm1m2*dZm1m2/(-4.*lq)-muZhat*muZhat/(2.*Snq+lq) - log_denom2(n,q)/2. + log_gamma(n,q);
double exp2 = log_gamma1(n,q) - Z2/(2.*lq);
double d_exp1,d_exp2;
if(exp1>exp2) {
d_exp1 = 1.;
d_exp2 = exp(exp2-exp1);
} else {
d_exp1 = exp(exp1-exp2);
d_exp2 = 1.;
}
double exp_sum = d_exp1+d_exp2;
dmu(n,q) += -2.*lpsi2*muZhat/denom*d_exp1/exp_sum;
dS(n,q) += lpsi2*(2.*muZhat2_denom-1.)/denom*d_exp1/exp_sum;
dgamma(n,q) += lpsi2*(d_exp1*g1nq-d_exp2*gnq)/exp_sum;
dl(q) += lpsi2*(((Snq/lq+muZhat2_denom)/denom+dZm1m2*dZm1m2/(4.*lq*lq))*d_exp1+Z2/(2.*lq*lq)*d_exp2)/exp_sum;
dZ(m1,q) += 2.*lpsi2*((muZhat/denom-dZm1m2/(2*lq))*d_exp1-Zm1q/lq*d_exp2)/exp_sum;
}
}
}
}
"""
weave.inline(code, support_code=support_code, arg_names=['dL_dpsi1','dL_dpsi2','psi1','psi2n','N','M','Q','variance','l2','Z','mu','S','gamma','gamma1','log_denom1','log_denom2','log_gamma','log_gamma1','dvar','dl','dmu','dS','dgamma','dZ'], type_converters=weave.converters.blitz)
dl *= 2.*lengthscale
if not ARD:
dl = dl.sum()
return dvar, dl, dZ, dmu, dS, dgamma
except:
def psicomputations(variance, lengthscale, Z, variational_posterior):
"""
Z - MxQ
mu - NxQ
S - NxQ
gamma - NxQ
"""
# here are the "statistics" for psi0, psi1 and psi2
# Produced intermediate results:
# _psi1 NxM
mu = variational_posterior.mean
S = variational_posterior.variance
gamma = variational_posterior.binary_prob
psi0 = np.empty(mu.shape[0])
psi0[:] = variance
psi1 = _psi1computations(variance, lengthscale, Z, mu, S, gamma)
psi2 = _psi2computations(variance, lengthscale, Z, mu, S, gamma)
return psi0, psi1, psi2
def _psi1computations(variance, lengthscale, Z, mu, S, gamma):
"""
Z - MxQ
mu - NxQ
S - NxQ
gamma - NxQ
"""
# here are the "statistics" for psi1
# Produced intermediate results:
# _psi1 NxM
lengthscale2 = np.square(lengthscale)
# psi1
_psi1_denom = S[:, None, :] / lengthscale2 + 1. # Nx1xQ
_psi1_denom_sqrt = np.sqrt(_psi1_denom) #Nx1xQ
_psi1_dist = Z[None, :, :] - mu[:, None, :] # NxMxQ
_psi1_dist_sq = np.square(_psi1_dist) / (lengthscale2 * _psi1_denom) # NxMxQ
_psi1_common = gamma[:,None,:] / (lengthscale2*_psi1_denom*_psi1_denom_sqrt) #Nx1xQ
_psi1_exponent1 = np.log(gamma[:,None,:]) - (_psi1_dist_sq + np.log(_psi1_denom))/2. # NxMxQ
_psi1_exponent2 = np.log(1.-gamma[:,None,:]) - (np.square(Z[None,:,:])/lengthscale2)/2. # NxMxQ
_psi1_exponent_max = np.maximum(_psi1_exponent1,_psi1_exponent2)
_psi1_exponent = _psi1_exponent_max+np.log(np.exp(_psi1_exponent1-_psi1_exponent_max) + np.exp(_psi1_exponent2-_psi1_exponent_max)) #NxMxQ
_psi1_exp_sum = _psi1_exponent.sum(axis=-1) #NxM
_psi1 = variance * np.exp(_psi1_exp_sum) # NxM
return _psi1
def _psi2computations(variance, lengthscale, Z, mu, S, gamma):
"""
Z - MxQ
mu - NxQ
S - NxQ
gamma - NxQ
"""
# here are the "statistics" for psi2
# Produced intermediate results:
# _psi2 MxM
lengthscale2 = np.square(lengthscale)
_psi2_Zhat = 0.5 * (Z[:, None, :] + Z[None, :, :]) # M,M,Q
_psi2_Zdist = 0.5 * (Z[:, None, :] - Z[None, :, :]) # M,M,Q
_psi2_Zdist_sq = np.square(_psi2_Zdist / lengthscale) # M,M,Q
_psi2_Z_sq_sum = (np.square(Z[:,None,:])+np.square(Z[None,:,:]))/lengthscale2 # MxMxQ
# psi2
_psi2_denom = 2.*S[:, None, None, :] / lengthscale2 + 1. # Nx1x1xQ
_psi2_denom_sqrt = np.sqrt(_psi2_denom)
_psi2_mudist = mu[:,None,None,:]-_psi2_Zhat #N,M,M,Q
_psi2_mudist_sq = np.square(_psi2_mudist)/(lengthscale2*_psi2_denom)
_psi2_common = gamma[:,None,None,:]/(lengthscale2 * _psi2_denom * _psi2_denom_sqrt) # Nx1x1xQ
_psi2_exponent1 = -_psi2_Zdist_sq -_psi2_mudist_sq -0.5*np.log(_psi2_denom)+np.log(gamma[:,None,None,:]) #N,M,M,Q
_psi2_exponent2 = np.log(1.-gamma[:,None,None,:]) - 0.5*(_psi2_Z_sq_sum) # NxMxMxQ
_psi2_exponent_max = np.maximum(_psi2_exponent1, _psi2_exponent2)
_psi2_exponent = _psi2_exponent_max+np.log(np.exp(_psi2_exponent1-_psi2_exponent_max) + np.exp(_psi2_exponent2-_psi2_exponent_max))
_psi2_exp_sum = _psi2_exponent.sum(axis=-1) #NxM
_psi2 = variance*variance * (np.exp(_psi2_exp_sum).sum(axis=0)) # MxM
return _psi2
def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
ARD = (len(lengthscale)!=1)
dvar_psi1, dl_psi1, dZ_psi1, dmu_psi1, dS_psi1, dgamma_psi1 = _psi1compDer(dL_dpsi1, variance, lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)
dvar_psi2, dl_psi2, dZ_psi2, dmu_psi2, dS_psi2, dgamma_psi2 = _psi2compDer(dL_dpsi2, variance, lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)
dL_dvar = np.sum(dL_dpsi0) + dvar_psi1 + dvar_psi2
dL_dlengscale = dl_psi1 + dl_psi2
if not ARD:
dL_dlengscale = dL_dlengscale.sum()
dL_dgamma = dgamma_psi1 + dgamma_psi2
dL_dmu = dmu_psi1 + dmu_psi2
dL_dS = dS_psi1 + dS_psi2
dL_dZ = dZ_psi1 + dZ_psi2
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS, dL_dgamma
def _psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S, gamma):
"""
dL_dpsi1 - NxM
Z - MxQ
mu - NxQ
S - NxQ
gamma - NxQ
"""
# here are the "statistics" for psi1
# Produced intermediate results: dL_dparams w.r.t. psi1
# _dL_dvariance 1
# _dL_dlengthscale Q
# _dL_dZ MxQ
# _dL_dgamma NxQ
# _dL_dmu NxQ
# _dL_dS NxQ
lengthscale2 = np.square(lengthscale)
# psi1
_psi1_denom = S / lengthscale2 + 1. # NxQ
_psi1_denom_sqrt = np.sqrt(_psi1_denom) #NxQ
_psi1_dist = Z[None, :, :] - mu[:, None, :] # NxMxQ
_psi1_dist_sq = np.square(_psi1_dist) / (lengthscale2 * _psi1_denom[:,None,:]) # NxMxQ
_psi1_common = gamma / (lengthscale2*_psi1_denom*_psi1_denom_sqrt) #NxQ
_psi1_exponent1 = np.log(gamma[:,None,:]) -0.5 * (_psi1_dist_sq + np.log(_psi1_denom[:, None,:])) # NxMxQ
_psi1_exponent2 = np.log(1.-gamma[:,None,:]) -0.5 * (np.square(Z[None,:,:])/lengthscale2) # NxMxQ
_psi1_exponent_max = np.maximum(_psi1_exponent1,_psi1_exponent2)
_psi1_exponent = _psi1_exponent_max+np.log(np.exp(_psi1_exponent1-_psi1_exponent_max) + np.exp(_psi1_exponent2-_psi1_exponent_max)) #NxMxQ
_psi1_exp_sum = _psi1_exponent.sum(axis=-1) #NxM
_psi1_exp_dist_sq = np.exp(-0.5*_psi1_dist_sq) # NxMxQ
_psi1_exp_Z = np.exp(-0.5*np.square(Z[None,:,:])/lengthscale2) # 1xMxQ
_psi1_q = variance * np.exp(_psi1_exp_sum[:,:,None] - _psi1_exponent) # NxMxQ
_psi1 = variance * np.exp(_psi1_exp_sum) # NxM
_dL_dvariance = np.einsum('nm,nm->',dL_dpsi1, _psi1)/variance # 1
_dL_dgamma = np.einsum('nm,nmq,nmq->nq',dL_dpsi1, _psi1_q, (_psi1_exp_dist_sq/_psi1_denom_sqrt[:,None,:]-_psi1_exp_Z)) # NxQ
_dL_dmu = np.einsum('nm, nmq, nmq, nmq, nq->nq',dL_dpsi1,_psi1_q,_psi1_exp_dist_sq,_psi1_dist,_psi1_common) # NxQ
_dL_dS = np.einsum('nm,nmq,nmq,nq,nmq->nq',dL_dpsi1,_psi1_q,_psi1_exp_dist_sq,_psi1_common,(_psi1_dist_sq-1.))/2. # NxQ
_dL_dZ = np.einsum('nm,nmq,nmq->mq',dL_dpsi1,_psi1_q, (- _psi1_common[:,None,:] * _psi1_dist * _psi1_exp_dist_sq - (1-gamma[:,None,:])/lengthscale2*Z[None,:,:]*_psi1_exp_Z))
_dL_dlengthscale = lengthscale* np.einsum('nm,nmq,nmq->q',dL_dpsi1,_psi1_q,(_psi1_common[:,None,:]*(S[:,None,:]/lengthscale2+_psi1_dist_sq)*_psi1_exp_dist_sq + (1-gamma[:,None,:])*np.square(Z[None,:,:]/lengthscale2)*_psi1_exp_Z))
return _dL_dvariance, _dL_dlengthscale, _dL_dZ, _dL_dmu, _dL_dS, _dL_dgamma
def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S, gamma):
"""
Z - MxQ
mu - NxQ
S - NxQ
gamma - NxQ
dL_dpsi2 - MxM
"""
# here are the "statistics" for psi2
# Produced the derivatives w.r.t. psi2:
# _dL_dvariance 1
# _dL_dlengthscale Q
# _dL_dZ MxQ
# _dL_dgamma NxQ
# _dL_dmu NxQ
# _dL_dS NxQ
lengthscale2 = np.square(lengthscale)
_psi2_Zhat = 0.5 * (Z[:, None, :] + Z[None, :, :]) # M,M,Q
_psi2_Zdist = 0.5 * (Z[:, None, :] - Z[None, :, :]) # M,M,Q
_psi2_Zdist_sq = np.square(_psi2_Zdist / lengthscale) # M,M,Q
_psi2_Z_sq_sum = (np.square(Z[:,None,:])+np.square(Z[None,:,:]))/lengthscale2 # MxMxQ
# psi2
_psi2_denom = 2.*S / lengthscale2 + 1. # NxQ
_psi2_denom_sqrt = np.sqrt(_psi2_denom)
_psi2_mudist = mu[:,None,None,:]-_psi2_Zhat #N,M,M,Q
_psi2_mudist_sq = np.square(_psi2_mudist)/(lengthscale2*_psi2_denom[:,None,None,:])
_psi2_common = gamma/(lengthscale2 * _psi2_denom * _psi2_denom_sqrt) # NxQ
_psi2_exponent1 = -_psi2_Zdist_sq -_psi2_mudist_sq -0.5*np.log(_psi2_denom[:,None,None,:])+np.log(gamma[:,None,None,:]) #N,M,M,Q
_psi2_exponent2 = np.log(1.-gamma[:,None,None,:]) - 0.5*(_psi2_Z_sq_sum) # NxMxMxQ
_psi2_exponent_max = np.maximum(_psi2_exponent1, _psi2_exponent2)
_psi2_exponent = _psi2_exponent_max+np.log(np.exp(_psi2_exponent1-_psi2_exponent_max) + np.exp(_psi2_exponent2-_psi2_exponent_max))
_psi2_exp_sum = _psi2_exponent.sum(axis=-1) #NxM
_psi2_q = variance*variance * np.exp(_psi2_exp_sum[:,:,:,None]-_psi2_exponent) # NxMxMxQ
_psi2_exp_dist_sq = np.exp(-_psi2_Zdist_sq -_psi2_mudist_sq) # NxMxMxQ
_psi2_exp_Z = np.exp(-0.5*_psi2_Z_sq_sum) # MxMxQ
_psi2 = variance*variance * (np.exp(_psi2_exp_sum).sum(axis=0)) # MxM
_dL_dvariance = np.einsum('mo,mo->',dL_dpsi2,_psi2)*2./variance
_dL_dgamma = np.einsum('mo,nmoq,nmoq->nq',dL_dpsi2,_psi2_q,(_psi2_exp_dist_sq/_psi2_denom_sqrt[:,None,None,:] - _psi2_exp_Z))
_dL_dmu = -2.*np.einsum('mo,nmoq,nq,nmoq,nmoq->nq',dL_dpsi2,_psi2_q,_psi2_common,_psi2_mudist,_psi2_exp_dist_sq)
_dL_dS = np.einsum('mo,nmoq,nq,nmoq,nmoq->nq',dL_dpsi2,_psi2_q, _psi2_common, (2.*_psi2_mudist_sq-1.), _psi2_exp_dist_sq)
_dL_dZ = 2.*np.einsum('mo,nmoq,nmoq->mq',dL_dpsi2,_psi2_q,(_psi2_common[:,None,None,:]*(-_psi2_Zdist*_psi2_denom[:,None,None,:]+_psi2_mudist)*_psi2_exp_dist_sq - (1-gamma[:,None,None,:])*Z[:,None,:]/lengthscale2*_psi2_exp_Z))
_dL_dlengthscale = 2.*lengthscale* np.einsum('mo,nmoq,nmoq->q',dL_dpsi2,_psi2_q,(_psi2_common[:,None,None,:]*(S[:,None,None,:]/lengthscale2+_psi2_Zdist_sq*_psi2_denom[:,None,None,:]+_psi2_mudist_sq)*_psi2_exp_dist_sq+(1-gamma[:,None,None,:])*_psi2_Z_sq_sum*0.5/lengthscale2*_psi2_exp_Z))
return _dL_dvariance, _dL_dlengthscale, _dL_dZ, _dL_dmu, _dL_dS, _dL_dgamma | unknown | codeparrot/codeparrot-clean | ||
# -*-coding:Utf-8 -*
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# External libraries
# ----------------------------------------------------------------------
import sys
import os
# ----------------------------------------------------------------------
# Packages
# ----------------------------------------------------------------------
import os
import sys
import copy
import emcee
import pickle
import glob
import shutil
import datetime
import importlib
import subprocess
import numpy as np
from scipy import stats
from scipy import interpolate
from sklearn import linear_model
import muLAn.models as mulanmodels
import muLAn.packages.algebra as algebra
# ----------------------------------------------------------------------
# CLASS
# ----------------------------------------------------------------------
class printoption:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
reset = '\033[0m'
bright = '\033[1m'
dim = '\033[2m'
underscore = '\033[4m'
blink = '\033[5m'
reverse = '\033[7m'
hidden = '\033[8m'
level0 = "\033[1m\033[31m"
level1 = "\033[1m"
good = "\033[32m"
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def help():
text = "grid_dmcmc - Differential Markov Chains Monte Carlo."
return text
# ----------------------------------------------------------------------
def bash_command(text):
proc = subprocess.Popen(text, shell=True, executable="/bin/bash")
proc.wait()
# ----------------------------------------------------------------------
# def update_progress(job_title, progress):
def update_progress(job_title, a, b):
length = 20
progress = float(a)/float(b)
block = int(round(length*progress))
msg = "\r {0}: [{1}] {2:3.0f}% --> {3:d} / {4:d}".format(job_title, "*"*block + "-"*(length-block), round(progress*100, 2), a, b)
if progress >= 1: msg = msg + " \033[1m\033[32mDONE\033[0m\r\n"
sys.stdout.write(msg)
sys.stdout.flush()
# ----------------------------------------------------------------------
def update_progress_grid(a, b, c, d):
length = 10
progress = float(a)/float(b)
progress_grid = float(c) / float(d)
block = int(round(length*progress))
block_grid = int(round(length * progress_grid))
msg = "\r Grid\033[34m+MCMC\033[0m: {2:4d} / {3:4d} <-- {4:3.0f}% [{0}]\033[34m[{1}] {5:3.0f}% --> {6:d} / {7:d}\033[0m\r".format("*"*block_grid + "-"*(length-block_grid),\
"*" * block + "-" * (length - block), c, d, round(progress_grid * 100, 2), round(progress*100, 2), a, b)
sys.stdout.write(msg)
sys.stdout.flush()
# ----------------------------------------------------------------------
def communicate(cfg, verbose, text, opts=False, prefix=False, newline=False, tab=False):
if cfg.getint('Modelling', 'Verbose') >= verbose:
if prefix:
text = "[muLAn] " + text
if opts!=False:
text2=''
for a in opts:
text2 = text2 + a
text = text2 + text + printoption.reset
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
else:
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
# ----------------------------------------------------------------------
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
# ----------------------------------------------------------------------
def combin(p):
Np = len(p)
Ng = 1
for m in range(0,Np):
Ng *= len(p[m])
gridlist = np.zeros((Np,Ng), dtype='f8') # to test
# gridlist = np.zeros((Np,Ng))
Nr = Ng
for m in range(0,Np):
Nr = Nr/len(p[m])
q = 0
l = 0
for k in range(0,Ng/Nr):
for n in range(0,Nr):
gridlist[m][q] = p[m][l%len(p[m])]
q += 1
l += 1
return gridlist
# ----------------------------------------------------------------------
def binrot(alpha, tau, beta, s, q):
"""Source position over the time. The general conventions used are
the same as in appendix A in [1].
Arguments:
alpha -- the angle between the lens symmetry axis and the source
trajectory;
tau -- the time component of the source position;
beta -- the component of the source position orthogonal to tau;
s -- the primary-secondary distance;
q -- the secondary-primary mass ratio.
Returns:
x -- numpy array including the x component of the source relative to
the lens center of mass (CM);
y -- numpy array including the y component of the source relative to
the CM.
References:
[1] Skowron et al. 2011, 738, 87.
"""
tau_chap = np.array([np.cos(alpha), np.sin(alpha)])
beta_chap = np.array([-np.sin(alpha), np.cos(alpha)])
lenssource = np.array([tau[i] * tau_chap + beta[i] * beta_chap for i in range(len(tau))])
gl1 = s * q/(1+q) * np.array([1, 0])
lenssource = lenssource - gl1
return lenssource.T[0], lenssource.T[1]
# ----------------------------------------------------------------------
def test_blending(mb_lim, g_lim, fs, fb, time_serie, cond2):
g_mod = fb/fs
mb_mod = 18.0 - 2.5*np.log10(fs+fb)
# Blending ok
cond = (g_lim[0] < g_mod) & (g_mod < g_lim[1]) & (mb_lim[0] < mb_mod) & (mb_mod < mb_lim[1])
if cond:
fs_new = fs
fb_new = fb
# Corners C1
cond = (g_lim[1] < g_mod) & (mb_lim[1] < mb_mod)
if cond:
fs_new = (10**((18.0-mb_lim[1])/2.5))/(1+g_lim[1])
fb_new = g_lim[1] * fs_new
# Corners C2
cond = (g_lim[1] < g_mod) & (mb_mod < mb_lim[0])
if cond:
fs_new = (10**(18.0-mb_lim[0]))/(1+g_lim[1])
fb_new = g_lim[1] * fs_new
# Corners C3
cond = (g_mod < g_lim[0]) & (mb_mod < mb_lim[0])
if cond:
fs_new = (10**(18.0-mb_lim[0]))/(1+g_lim[0])
fb_new = g_lim[0] * fs_new
# Corners C4
cond = (g_mod < g_lim[0]) & (mb_lim[1] < mb_mod)
if cond:
fs_new = (10**((18.0-mb_lim[1])/2.5))/(1+g_lim[0])
fb_new = g_lim[0] * fs_new
# Boundary B1
cond = (g_lim[0] < g_mod) & (g_mod < g_lim[1]) & (mb_lim[1] < mb_mod)
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] - 1.0)
y = np.atleast_2d(time_serie['flux'][cond2] - 10 ** ((18.0 - mb_lim[1]) / 2.5))
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = 10 ** ((18.0 - mb_lim[1]) / 2.5) - fs_new
# Boundary B2
cond = (g_lim[1] < g_mod) & (mb_lim[0] < mb_mod) & (mb_mod < mb_lim[1])
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] + g_lim[1]).T
y = np.atleast_2d(time_serie['flux'][cond2]).T
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = g_lim[1]*fs_new
# Boundary B3
cond = (g_lim[0] < g_mod) & (g_mod < g_lim[1]) & (mb_mod < mb_lim[0])
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] - 1.0)
y = np.atleast_2d(time_serie['flux'][cond2] - 10 ** ((18.0 - mb_lim[0]) / 2.5))
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = 10 ** ((18.0 - mb_lim[0]) / 2.5) - fs_new
# Boundary B4
cond = (g_mod < g_lim[0]) & (mb_lim[0] < mb_mod) & (mb_mod < mb_lim[1])
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] + g_lim[0]).T
y = np.atleast_2d(time_serie['flux'][cond2]).T
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = g_lim[0]*fs_new
return fs_new, fb_new
# ----------------------------------------------------------------------
# def sort_on_runtime(pos):
# print(pos)
# p = np.atleast_2d(pos)
# idx = np.argsort(p[:, 3])#[::-1]
# #print(idx)
# return p[idx], idx
# ----------------------------------------------------------------------
def lnprior(param_model):
p = 0
if param_model['t0'] < 0:
p = 1e12
if param_model['rho'] < 0:
p = 1e12
if param_model['rho'] > 1.0:
p = 1e12
if param_model['tE'] < 1e-10:
p = 1e12
if param_model['q'] < 1e-9:
p = 1e12
# if param_model['q'] > 1.0:
# p = 1e12
if param_model['s'] < 1e-10:
p = 1e12
if param_model['s'] > 10:
p = 1e12
return p
# ----------------------------------------------------------------------
def lnprob(theta, time_serie, model_params, fitted_param, nuisance, models_names,
interpol_method, tb, cfgsetup):
# print(theta[2])
#import modulesloading as load_modules
#models, y = load_modules.main()
#models = {models_names[i] : models[i] for i in range(len(models_names))}
models = dict()
for i in range(len(models_names)):
text = 'muLAn.models.{:s}'.format(models_names[i])
importlib.import_module(text)
models.update({models_names[i]: getattr(mulanmodels, models_names[i])})
# print(res)
# -----------
# print(models)
# print(models_names)
# sys.exit()
# print(models)
# print("Hello, c'est moi")
flag_fix_gamma = 1
key_list = np.array([])
for key, value in fitted_param.iteritems():
key_list = np.append(key_list, key)
param_model = nuisance
id=0
cond = (key_list=='t0')
if cond.sum()==1:
param_model.update({'t0' : theta[id]})
id=id+1
cond = (key_list=='u0')
if cond.sum()==1:
param_model.update({'u0' : theta[id]})
id=id+1
cond = (key_list=='tE')
if cond.sum()==1:
param_model.update({'tE' : theta[id]})
id=id+1
cond = (key_list=='rho')
if cond.sum()==1:
param_model.update({'rho' : theta[id]})
id=id+1
cond = (key_list=='gamma')
if cond.sum()==1:
param_model.update({'gamma' : theta[id]})
flag_fix_gamma = 0
id=id+1
cond = (key_list=='piEE')
if cond.sum()==1:
param_model.update({'piEE' : theta[id]})
id=id+1
cond = (key_list=='piEN')
if cond.sum()==1:
param_model.update({'piEN' : theta[id]})
id=id+1
cond = (key_list=='s')
if cond.sum()==1:
param_model.update({'s' : theta[id]})
id=id+1
cond = (key_list=='q')
if cond.sum()==1:
if theta[id] < 1.0:
param_model.update({'q' : theta[id]})
else:
try:
param_model.update({'q' : 1.0 / theta[id]})
except:
param_model.update({'q' : theta[id]})
id=id+1
cond = (key_list=='alpha')
if cond.sum()==1:
param_model.update({'alpha' : theta[id]})
id=id+1
cond = (key_list == 'dalpha')
if cond.sum()==1:
param_model.update({'dalpha' : theta[id]})
id=id+1
cond = (key_list == 'ds')
if cond.sum()==1:
param_model.update({'ds' : theta[id]})
# id=id+1
# Evaluate priors
chi2 = 0
lnprior_curr = lnprior(param_model)
if lnprior_curr < 1e11:
# print("Amplification, tu veux ?")
# Calculation of the amplification
observatories = np.unique(time_serie['obs'])
models_lib = np.unique(time_serie['model'])
for j in range(len(observatories)):
cond2 = (time_serie['obs']==observatories[j])
#print(observatories[j])
if flag_fix_gamma:
param_model.update({'gamma': time_serie['gamma'][cond2][0]})
for i in range(models_lib.shape[0]):
#print(models_lib[i])
cond = (time_serie['model'] == models_lib[i]) & (time_serie['obs']==observatories[j])\
& (time_serie['interpol'] == '0')
if cond.sum() > 0:
time_serie_export = time_serie['dates'][cond]
DsN_export = time_serie['DsN'][cond]
DsE_export = time_serie['DsE'][cond]
Ds_export = dict({'N':DsN_export, 'E':DsE_export})
try:
kwargs_method = dict(cfgsetup.items(models_lib[i]))
except:
kwargs_method = dict()
amp = models[models_lib[i]].magnifcalc(time_serie_export, param_model, Ds=Ds_export, tb=tb, **kwargs_method)
time_serie['amp'][cond] = amp
del amp
# Interpolation method
# -------------------------------------------------------------------------
key_list = [key for key in interpol_method]
if len(key_list) > 0:
for i in range(len(key_list)):
time_serie_export = interpol_method[key_list[i]][0]
DsN_export = interpol_method[key_list[i]][1]
DsE_export = interpol_method[key_list[i]][2]
Ds_export = dict({'N':DsN_export, 'E':DsE_export})
name = key_list[i].split('#')[1]
try:
kwargs_method = dict(cfgsetup.items(name))
except:
kwargs_method = dict()
amp = models[name].magnifcalc(time_serie_export, param_model, Ds=Ds_export, tb=tb, **kwargs_method)
# print(amp)
interpol_method[key_list[i]][3] = amp
interpol_func = interpolate.interp1d(time_serie_export, amp, kind='linear')
# interpol_func.update({key_list[i]: interpolate.interp1d(time_serie_export, amp)})
cond = (time_serie['interpol'] == key_list[i])
if cond.sum() > 0:
amp = interpol_func(time_serie['dates'][cond])
time_serie['amp'][cond] = amp
# Source and blending fluxes.
# -------------------------------------------------------------------------
for j in range(len(observatories)):
cond2 = (time_serie['obs']==observatories[j])
#print(observatories[j])
# Calculation of fs and fb
# fs, fb = algebra.fsfb(time_serie, cond2, blending=True)
fs, fb = algebra.fsfbwsig(time_serie, cond2, blending=True)
# Relevance of blending for OGLE
# if (observatories[j]=="ogle-i"):
# mb_lim = [17.25, 17.36]
# g_lim = [0.0, 10.0]
# fs, fb = test_blending(mb_lim, g_lim, fs, fb, time_serie, cond2)
time_serie['fs'][cond2] = fs
time_serie['fb'][cond2] = fb
if (np.abs(fs) == np.inf) | (np.abs(fb) == np.inf):
lnprior_curr = - np.inf
# print("Amplification, tu as...")
# Calculation of chi2
# print(param_model, time_serie['amp'])
if lnprior_curr < 1e11:
time_serie['flux_model'] = time_serie['amp']*time_serie['fs'] + time_serie['fb']
time_serie['chi2pp'] = np.power((time_serie['flux']-time_serie['flux_model'])/time_serie['err_flux'], 2)
chi2 = np.sum(time_serie['chi2pp'])
result = - chi2/2.0 - lnprior_curr
else:
time_serie['flux_model'] = np.ones(len(time_serie['amp']))
time_serie['chi2pp'] = np.ones(len(time_serie['amp']))*1e12
result = -1e12
else:
time_serie['flux_model'] = np.ones(len(time_serie['amp']))
time_serie['chi2pp'] = np.ones(len(time_serie['amp']))*1e12
result = -1e12
if (chi2 < 1e-3) | (chi2 == np.inf):
time_serie['flux_model'] = np.ones(len(time_serie['amp']))
time_serie['chi2pp'] = np.ones(len(time_serie['amp']))*1e12
result = -1e12
return result
# ----------------------------------------------------------------------
def ini_chains_gene(fitted_param, nwalkers, params):
result = []
key_list = np.array([key for key in fitted_param])
key_list_order = np.array(['t0', 'u0', 'tE', 'rho', 'gamma', 'piEE', 'piEN', 's', 'q', 'alpha', 'dalpha', 'ds'])
intersection = np.intersect1d(key_list_order, key_list)
key_list = [key for key in key_list_order if (len(np.where(intersection==key)[0])>0)]
l = 0
while(l<nwalkers):
table = np.array([])
for key in key_list:
if l==0: table = np.append(table, fitted_param[key])
else:
a = fitted_param[key] - abs(float(params[key][1]))
b = fitted_param[key] + abs(float(params[key][2]))
x = (np.max([a, b]) - np.min([a, b])) * np.random.random_sample() + np.min([a, b])
table = np.append(table, x)
result.append(table)
l = l + 1
return result
# ----------------------------------------------------------------------
# Differential MCMC
# ----------------------------------------------------------------------
def search(cfgsetup=False, models=False, model_param=False, time_serie=False,\
model2load=False, interpol_method=False):
# ==================================================================
# Preparing MCMC
# ==================================================================
# Emergency Stop initialization
if os.path.exists(cfgsetup.get('FullPaths', 'Event') + '.emergencystop'):
os.remove(cfgsetup.get('FullPaths', 'Event') + '.emergencystop')
file = open(cfgsetup.get('FullPaths', 'Event') + '.emergencystop', 'w')
file.write('0')
file.close()
fn_lock = cfgsetup.get('FullPaths', 'Event') + '.lock'
if not os.path.exists(fn_lock): open(fn_lock, 'w').close()
# Parameter to be fitted / Nuisance parameters
params = {
't0' : np.array([a.strip() for a in cfgsetup.get('Modelling',
't0').split(',')]),\
'u0' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'u0').split(',')]),\
'tE' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'tE').split(',')]),\
'rho' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'rho').split(',')]),\
'gamma' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'gamma').split(',')]),\
'piEE' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'piEE').split(',')]),\
'piEN' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'piEN').split(',')]),\
's' : np.array([a.strip() for a in cfgsetup.get('Modelling',
's').split(',')]),\
'q' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'q').split(',')]),\
'alpha' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'alpha').split(',')]),\
'dalpha': np.array([a.strip() for a in cfgsetup.get('Modelling', 'dalpha').split(',')]),\
'ds': np.array([a.strip() for a in cfgsetup.get('Modelling', 'ds').split(',')])\
}
# Files
path = cfgsetup.get('FullPaths', 'Event')\
+ cfgsetup.get('RelativePaths', 'ModelsHistory')\
+ cfgsetup.get('Controls', 'Archive')\
+ '-ModelsSummary.txt'
if os.path.exists(path): os.remove(path)
sys.path.insert(0, cfgsetup.get('FullPaths', 'Code') + 'packages/')
if (cfgsetup.getboolean('FitSetupDMCMC', 'Resume')==False):
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
shutil.rmtree(path)
if not os.path.exists(path): os.makedirs(path)
for i in range(cfgsetup.getint('FitSetupDMCMC', 'Chains')):
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '# Exploration of chain n°{:d}.\n'.format(i)\
+ '#{:>9} '.format('ID')\
+ '{:>17} '.format('t0')\
+ '{:>17} '.format('u0')\
+ '{:>17} '.format('tE')\
+ '{:>17} '.format('rho')\
+ '{:>17} '.format('gamma')\
+ '{:>17} '.format('piEN')\
+ '{:>17} '.format('piEE')\
+ '{:>17} '.format('s')\
+ '{:>17} '.format('q')\
+ '{:>17} '.format('alpha')\
+ '{:>17} '.format('dalpha')\
+ '{:>17} '.format('ds')\
+ '{:>17} '.format('chi2')\
+ '{:>7} '.format('accrate')\
+ '{:>8} '.format('date')\
+ '{:>6} '.format('hour')\
+ '{:>17} '.format('chi2/dof')\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
accrate_loaded = np.array([])
id_loaded = np.array([])
else:
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
if not os.path.exists(path):
text = "\n\033[1m\033[91mDirectory with chains is missing in 'Resume' mode. muLAn killed.\033[0m"
sys.exit(text)
else:
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
fnames_chains = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*-c*.txt")
fnames_chains_exclude = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*g*.txt")
temp = []
for a in fnames_chains:
if (a in fnames_chains_exclude) == False:
temp.append(a)
fnames_chains = copy.deepcopy(temp)
del temp, fnames_chains_exclude
nb_chains = len(fnames_chains)
if nb_chains != cfgsetup.getint("FitSetupDMCMC", "Chains"):
text = "\n\033[1m\033[91mThe number of chains does not fit in 'Resume' mode. muLAn killed.\033[0m"
sys.exit(text)
samples_file = dict({'chi2': [], 't0': [], 'u0': [], 'tE': [], 'rho': [], 'gamma': [], 'piEE': [], 'piEN': [], 's': [], 'q': [], 'alpha': [], 'dalpha': [], 'ds': [], 'chain': [], 'fullid': [], 'date_save': [], 'time_save': [], 'id': [], 'accrate': [], 'chi2/dof': []})
accrate_loaded = np.array([])
id_loaded = np.array([])
for i in range(nb_chains):
file = open(fnames_chains[i], 'r')
for line in file:
params_model = line
if params_model[0] == '#':
continue
samples_file['id'].append(int([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][0]))
samples_file['t0'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][1]))
samples_file['u0'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][2]))
samples_file['tE'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][3]))
samples_file['rho'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][4]))
samples_file['gamma'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][5]))
samples_file['piEN'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][6]))
samples_file['piEE'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][7]))
samples_file['s'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][8]))
samples_file['q'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][9]))
samples_file['alpha'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][10]))
samples_file['dalpha'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][11]))
samples_file['ds'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][12]))
samples_file['chi2'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][13]))
samples_file['accrate'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][14]))
samples_file['date_save'].append(int([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][15]))
samples_file['time_save'].append([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][16])
samples_file['chi2/dof'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][17]))
samples_file['chain'].append(int(fnames_chains[i][-8:-4]))
samples_file['fullid'].append(-1)
file.close()
accrate_loaded = np.append(accrate_loaded, samples_file['accrate'][-1])
id_loaded = np.append(id_loaded, samples_file['id'][-1])
filename = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')\
+ cfgsetup.get('Controls', 'Archive') + "-lastposition.p"
file = open(filename, "r")
pos_pickle = pickle.load(file)
file.close()
filename = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')\
+ cfgsetup.get('Controls', 'Archive') + "-rgenerator.p"
file = open(filename, "r")
rgenerator_piclke = pickle.load(file)
file.close()
del samples_file
# Prepare the grids
grid_params = np.array([])
format = 'import numpy as np\nimport pickle\ntab = np.array(['
i = 0
if params['t0'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['t0'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['t0'][1], params['t0'][2], params['t0'][3])
if params['u0'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['u0'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['u0'][1], params['u0'][2], params['u0'][3])
if params['tE'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['tE'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['tE'][1], params['tE'][2], params['tE'][3])
if params['rho'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['rho'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['rho'][1], params['rho'][2], params['rho'][3])
if params['gamma'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['gamma'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['gamma'][1], params['gamma'][2], params['gamma'][3])
if params['piEE'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['piEE'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['piEE'][1], params['piEE'][2], params['piEE'][3])
if params['piEN'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['piEN'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['piEN'][1], params['piEN'][2], params['piEN'][3])
if params['s'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['s'])
a = float(params['s'][1])
b = float(params['s'][2])
if (a > 0) & (b > 0):
format = format + 'np.logspace({0:.10e}, {1:.10e}, {2:s}),'.format(np.log10(a), np.log10(b), params['s'][3])
else:
sys.exit('Please enter a positive value for s.')
if params['q'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['q'])
a = float(params['q'][1])
b = float(params['q'][2])
if (a > 0) & (b > 0):
format = format + 'np.logspace({0:.10e}, {1:.10e}, {2:s}),'.format(np.log10(a), np.log10(b), params['q'][3])
else:
sys.exit('Please enter a positive value for q.')
if params['alpha'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['alpha'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['alpha'][1], params['alpha'][2], params['alpha'][3])
if params['dalpha'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['dalpha'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['dalpha'][1], params['dalpha'][2], params['dalpha'][3])
if params['ds'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['ds'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['ds'][1], params['ds'][2], params['ds'][3])
format = format[:-1] + '])\n'
format = format + 'file_save = open("' + cfgsetup.get('FullPaths', 'Code')\
+ 'tmp.p", "w")\npickle.dump(tab, file_save)\nfile_save.close()\n'
filename = cfgsetup.get('FullPaths', 'Code') + 'temp_grid.py'
file_temp = open(filename, 'w')
file_temp.write(format)
file_temp.close()
flag_grid_yes = 1
if i>0:
execfile(filename)
filename = cfgsetup.get('FullPaths', 'Code') + 'tmp.p'
file = open(filename, 'r')
grid_values = pickle.load(file)
file.close()
os.remove(filename)
grid_values_combined = combin(grid_values)
nb_params_grid = len(grid_values_combined)
lengh_grid = len(grid_values_combined.T)
else:
nb_params_grid = 1
lengh_grid = 1
flag_grid_yes = 0
filename = cfgsetup.get('FullPaths', 'Code') + 'temp_grid.py'
os.remove(filename)
# Prepare the DMCMC
# print(lengh_grid)
t0_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
u0_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
tE_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
rho_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
gamma_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
piEN_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
piEE_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
s_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
q_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
alpha_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
dalpha_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
ds_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
lnprob_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
accrate_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
date_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'S8')
hour_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'S6')
for id_grid in range(lengh_grid):
# if flag_grid_yes:
# text = '\nGrid: {:d} / {:d}'.format(id_grid+1, lengh_grid)
# communicate(cfgsetup, 1, text)
# update_progress_grid("Grid progression", id_grid+1, lengh_grid)
if flag_grid_yes:
node = dict()
for id2_grid in range(nb_params_grid):
node.update({grid_params[id2_grid] : grid_values_combined.T[id_grid][id2_grid]})
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
for i in range(cfgsetup.getint('FitSetupDMCMC', 'Chains')):
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '-g{:d}'.format(id_grid) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '# Exploration of chain n°{:d}.\n'.format(i)\
+ '#{:>9} '.format('ID')\
+ '{:>17} '.format('t0')\
+ '{:>17} '.format('u0')\
+ '{:>17} '.format('tE')\
+ '{:>17} '.format('rho')\
+ '{:>17} '.format('gamma')\
+ '{:>17} '.format('piEN')\
+ '{:>17} '.format('piEE')\
+ '{:>17} '.format('s')\
+ '{:>17} '.format('q')\
+ '{:>17} '.format('alpha')\
+ '{:>17} '.format('dalpha')\
+ '{:>17} '.format('ds')\
+ '{:>17} '.format('chi2')\
+ '{:>7} '.format('accrate')\
+ '{:>8} '.format('date')\
+ '{:>6} '.format('hour')\
+ '{:>17} '.format('chi2/dof')\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
# Prepare the DMCMC
nuisance = dict()
fitted_param = dict()
result = np.array([])
if params['t0'][0]!="fit":
if params['t0'][0]=="gri":
nuisance.update({'t0': node['t0']})
else:
nuisance.update({'t0': params['t0'][3].astype(np.float64)})
else:
fitted_param.update({'t0': params['t0'][3].astype(np.float64)})
result = np.append(result, fitted_param['t0'])
if params['u0'][0]!="fit":
if params['u0'][0]=="gri":
nuisance.update({'u0': node['u0']})
else:
nuisance.update({'u0': params['u0'][3].astype(np.float64)})
else:
fitted_param.update({'u0': params['u0'][3].astype(np.float64)})
result = np.append(result, fitted_param['u0'])
if params['tE'][0]!="fit":
if params['tE'][0]=="gri":
nuisance.update({'tE': node['tE']})
else:
nuisance.update({'tE': params['tE'][3].astype(np.float64)})
else:
fitted_param.update({'tE': params['tE'][3].astype(np.float64)})
result = np.append(result, fitted_param['tE'])
if params['rho'][0]!="fit":
if params['rho'][0]=="gri":
nuisance.update({'rho': node['rho']})
else:
nuisance.update({'rho': params['rho'][3].astype(np.float64)})
else:
fitted_param.update({'rho': params['rho'][3].astype(np.float64)})
result = np.append(result, fitted_param['rho'])
if params['gamma'][0]!="fit":
if params['gamma'][0]=="gri":
nuisance.update({'gamma': node['gamma']})
else:
nuisance.update({'gamma': params['gamma'][3].astype(np.float64)})
else:
fitted_param.update({'gamma': params['gamma'][3].astype(np.float64)})
result = np.append(result, fitted_param['gamma'])
if params['piEE'][0]!="fit":
if params['piEE'][0]=="gri":
nuisance.update({'piEE': node['piEE']})
else:
nuisance.update({'piEE': params['piEE'][3].astype(np.float64)})
else:
fitted_param.update({'piEE': params['piEE'][3].astype(np.float64)})
result = np.append(result, fitted_param['piEE'])
if params['piEN'][0]!="fit":
if params['piEN'][0]=="gri":
nuisance.update({'piEN': node['piEN']})
else:
nuisance.update({'piEN': params['piEN'][3].astype(np.float64)})
else:
fitted_param.update({'piEN': params['piEN'][3].astype(np.float64)})
result = np.append(result, fitted_param['piEN'])
if params['s'][0]!="fit":
if params['s'][0]=="gri":
nuisance.update({'s': node['s']})
else:
nuisance.update({'s': params['s'][3].astype(np.float64)})
else:
fitted_param.update({'s': params['s'][3].astype(np.float64)})
result = np.append(result, fitted_param['s'])
if params['q'][0]!="fit":
if params['q'][0]=="gri":
nuisance.update({'q': node['q']})
else:
nuisance.update({'q': params['q'][3].astype(np.float64)})
else:
fitted_param.update({'q': params['q'][3].astype(np.float64)})
result = np.append(result, fitted_param['q'])
if params['alpha'][0]!="fit":
if params['alpha'][0]=="gri":
nuisance.update({'alpha': node['alpha']})
else:
nuisance.update({'alpha': params['alpha'][3].astype(np.float64)})
else:
fitted_param.update({'alpha': params['alpha'][3].astype(np.float64)})
result = np.append(result, fitted_param['alpha'])
if params['dalpha'][0]!="fit":
if params['dalpha'][0]=="gri":
nuisance.update({'dalpha': node['dalpha']})
else:
nuisance.update({'dalpha': params['dalpha'][3].astype(np.float64)})
else:
fitted_param.update({'dalpha': params['dalpha'][3].astype(np.float64)})
result = np.append(result, fitted_param['dalpha'])
if params['ds'][0]!="fit":
if params['ds'][0]=="gri":
nuisance.update({'ds': node['ds']})
else:
nuisance.update({'ds': params['ds'][3].astype(np.float64)})
else:
fitted_param.update({'ds': params['ds'][3].astype(np.float64)})
result = np.append(result, fitted_param['ds'])
# Parameters of MCMC
ndim, nwalkers = len(fitted_param), cfgsetup.getint('FitSetupDMCMC', 'Chains') # Attention nwalkers nombre pair.
# pos = [result + 0.1*np.random.randn(ndim) for i in range(
# nwalkers)]
if fitted_param!={}:
# Use delta random in a specified interval
if cfgsetup.getboolean("FitSetupDMCMC", "Resume"):
if pos_pickle.shape[1] != len(fitted_param):
text = "\n\033[1m\033[91mThe number of fitted parameters does not fit in 'Resume' mode. muLAn killed.\033[0m"
sys.exit(text)
if flag_grid_yes==1:
text = "\n\033[1m\033[91m'Resume' mode not compatible with a grid. muLAn killed.\033[0m"
sys.exit(text)
pos = pos_pickle
rstate = rgenerator_piclke
else:
pos = ini_chains_gene(fitted_param, nwalkers, params)
rstate = None
# Sampler
if id_grid > 0:
del sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(time_serie, model_param, fitted_param, nuisance, model2load, interpol_method, cfgsetup.getfloat('Modelling', 'tb'), cfgsetup),
threads=cfgsetup.getint('FitSetupDMCMC', 'Threads'))
# ==============================================================
# RUN MCMC
# ==============================================================
chain_lengh = cfgsetup.getint('FitSetupDMCMC', 'ChainLength')
id_model = np.ones(nwalkers, dtype=np.int64)
for pos, lnprobc, rstate in sampler.sample(pos, rstate0=rstate, iterations=chain_lengh, storechain=False):
# text = 'MCMC: {:d} / {:d}'.format(id_model[0], chain_lengh)
# communicate(cfgsetup, 1, text)
if cfgsetup.getint("Modelling", "Verbose") >=3:
if flag_grid_yes:
update_progress_grid(id_model[0], chain_lengh, id_grid+1, lengh_grid)
else:
update_progress("Progression", id_model[0], chain_lengh)
accrate = sampler.acceptance_fraction
key_list = np.array([])
for key, value in fitted_param.iteritems():
key_list = np.append(key_list, key)
for i in range(nwalkers):
param_model = nuisance
id = 0
cond = (key_list=='t0')
if cond.sum()==1:
param_model.update({'t0' : pos[i][id]})
id=id+1
cond = (key_list=='u0')
if cond.sum()==1:
param_model.update({'u0' : pos[i][id]})
id=id+1
cond = (key_list=='tE')
if cond.sum()==1:
param_model.update({'tE' : pos[i][id]})
id=id+1
cond = (key_list=='rho')
if cond.sum()==1:
param_model.update({'rho' : pos[i][id]})
id=id+1
cond = (key_list=='gamma')
if cond.sum()==1:
param_model.update({'gamma' : pos[i][id]})
id=id+1
cond = (key_list=='piEE')
if cond.sum()==1:
param_model.update({'piEE' : pos[i][id]})
id=id+1
cond = (key_list=='piEN')
if cond.sum()==1:
param_model.update({'piEN' : pos[i][id]})
id=id+1
cond = (key_list=='s')
if cond.sum()==1:
param_model.update({'s' : pos[i][id]})
id=id+1
cond = (key_list=='q')
if cond.sum()==1:
param_model.update({'q' : pos[i][id]})
id=id+1
cond = (key_list=='alpha')
if cond.sum()==1:
param_model.update({'alpha' : pos[i][id]})
id=id+1
cond = (key_list == 'dalpha')
if cond.sum() == 1:
param_model.update({'dalpha': pos[i][id]})
id=id+1
cond = (key_list == 'ds')
if cond.sum() == 1:
param_model.update({'ds': pos[i][id]})
id=id+1
# id=id+1
if flag_grid_yes:
# Best model
if id_model[i]==1:
t0_best[i] = param_model['t0']
u0_best[i] = param_model['u0']
tE_best[i] = param_model['tE']
rho_best[i] = param_model['rho']
gamma_best[i] = param_model['gamma']
piEN_best[i] = param_model['piEN']
piEE_best[i] = param_model['piEE']
s_best[i] = param_model['s']
q_best[i] = param_model['q']
alpha_best[i] = param_model['alpha']
dalpha_best[i] = param_model['dalpha']
ds_best[i] = param_model['ds']
lnprob_best[i] = lnprobc[i]
accrate_best[i] = accrate[i]
date_best[i] = datetime.date.today().strftime("%Y%m%d")
hour_best[i] = datetime.datetime.utcnow().strftime("%H%M%S")
elif lnprobc[i]>lnprob_best[i]:
t0_best[i] = param_model['t0']
u0_best[i] = param_model['u0']
tE_best[i] = param_model['tE']
rho_best[i] = param_model['rho']
gamma_best[i] = param_model['gamma']
piEN_best[i] = param_model['piEN']
piEE_best[i] = param_model['piEE']
s_best[i] = param_model['s']
q_best[i] = param_model['q']
alpha_best[i] = param_model['alpha']
dalpha_best[i] = param_model['dalpha']
ds_best[i] = param_model['ds']
lnprob_best[i] = lnprobc[i]
accrate_best[i] = accrate[i]
date_best[i] = datetime.date.today().strftime("%Y%m%d")
hour_best[i] = datetime.datetime.utcnow().strftime("%H%M%S")
# Save Chains
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '-g{:d}'.format(id_grid) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(id_model[i])\
+ '{:+.10e} '.format(param_model['t0'])\
+ '{:+.10e} '.format(param_model['u0'])\
+ '{:+.10e} '.format(param_model['tE'])\
+ '{:+.10e} '.format(param_model['rho'])\
+ '{:+.10e} '.format(param_model['gamma'])\
+ '{:+.10e} '.format(param_model['piEN'])\
+ '{:+.10e} '.format(param_model['piEE'])\
+ '{:+.10e} '.format(param_model['s'])\
+ '{:+.10e} '.format(param_model['q'])\
+ '{:+.10e} '.format(param_model['alpha'])\
+ '{:+.10e} '.format(param_model['dalpha'])\
+ '{:+.10e} '.format(param_model['ds'])\
+ '{:+.10e} '.format(-2.0*lnprobc[i])\
+ '{:>7.3f} '.format(accrate[i])\
+ '{:8} '.format(datetime.date.today().strftime("%Y%m%d"))\
+ '{:6} '.format(datetime.datetime.utcnow().strftime("%H%M%S"))\
+ '{:+.10e}'.format(-2.0*lnprobc[i]/(len(time_serie['dates'])-len(fitted_param)-len(grid_params)))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
else:
if (len(accrate_loaded)>0) & (len(id_loaded)>0):
id_model_curr = int(id_model[i] + id_loaded[i])
accrate_curr = (1.0 * accrate[i] * id_model[i] + 1.0 * accrate_loaded[i] * id_loaded[i]) / id_model_curr
else:
id_model_curr = int(id_model[i])
accrate_curr = accrate[i]
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(id_model_curr)\
+ '{:+.10e} '.format(param_model['t0'])\
+ '{:+.10e} '.format(param_model['u0'])\
+ '{:+.10e} '.format(param_model['tE'])\
+ '{:+.10e} '.format(param_model['rho'])\
+ '{:+.10e} '.format(param_model['gamma'])\
+ '{:+.10e} '.format(param_model['piEN'])\
+ '{:+.10e} '.format(param_model['piEE'])\
+ '{:+.10e} '.format(param_model['s'])\
+ '{:+.10e} '.format(param_model['q'])\
+ '{:+.10e} '.format(param_model['alpha'])\
+ '{:+.10e} '.format(param_model['dalpha'])\
+ '{:+.10e} '.format(param_model['ds'])\
+ '{:+.10e} '.format(-2.0*lnprobc[i])\
+ '{:>7.3f} '.format(accrate_curr)\
+ '{:8} '.format(datetime.date.today().strftime("%Y%m%d"))\
+ '{:6} '.format(datetime.datetime.utcnow().strftime("%H%M%S"))\
+ '{:+.10e}'.format(-2.0*lnprobc[i]/(len(time_serie['dates'])-len(fitted_param)-len(grid_params)))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
id_model[i] = id_model[i] + 1
# Emergency Stop
file = open(cfgsetup.get('FullPaths', 'Event') + '.emergencystop', 'r')
stop = 0
for line in file:
if line.strip() == '1':
stop=1
file.close()
fn_lock = cfgsetup.get('FullPaths', 'Event') + '.lock'
if not os.path.exists(fn_lock): stop=1
# Record the last position
filename4pos = path + cfgsetup.get('Controls', 'Archive') + '-lastposition.p'
file_save = open(filename4pos, "w")
pickle.dump(pos, file_save)
file_save.close()
del pos
# Record the state of the pseudo-random generator
filename = path + cfgsetup.get('Controls', 'Archive') + '-rgenerator.p'
file_save = open(filename, "w")
pickle.dump(rstate, file_save)
file_save.close()
if stop==1:
break
# print("On continue")
# Save best model if grid
if flag_grid_yes:
for i in range(nwalkers):
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(id_grid+1)\
+ '{:+.10e} '.format(t0_best[i])\
+ '{:+.10e} '.format(u0_best[i])\
+ '{:+.10e} '.format(tE_best[i])\
+ '{:+.10e} '.format(rho_best[i])\
+ '{:+.10e} '.format(gamma_best[i])\
+ '{:+.10e} '.format(piEN_best[i])\
+ '{:+.10e} '.format(piEE_best[i])\
+ '{:+.10e} '.format(s_best[i])\
+ '{:+.10e} '.format(q_best[i])\
+ '{:+.10e} '.format(alpha_best[i])\
+ '{:+.10e} '.format(dalpha_best[i])\
+ '{:+.10e} '.format(ds_best[i]) \
+ '{:+.10e} '.format(-2.0*lnprob_best[i])\
+ '{:>7.3f} '.format(accrate[i])\
+ '{:8} '.format(date_best[i])\
+ '{:6} '.format(hour_best[i])\
+ '{:+.10e}'.format(-2.0*lnprobc[i]/(len(time_serie['dates'])-len(fitted_param)-len(grid_params)))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
if stop==1:
break
# Create an archive for each MCMC on the grid
if flag_grid_yes:
path_event = cfgsetup.get('FullPaths', 'Event')
path = path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '{:d}'.format(id_grid) + '/'
os.makedirs(path)
text = 'cp ' + path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '*g' + '{:d}'.format(id_grid) + '* ' + path
bash_command(text)
text = 'cp ' + path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '*.p ' + path
bash_command(text)
shutil.make_archive(path, 'zip', path)
shutil.rmtree(path)
text = 'rm ' + path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '*g' + '{:d}'.format(id_grid) + '* '
bash_command(text)
else:
stop = 0
for i in range(nwalkers):
param_model = nuisance
# Calculation of the amplification
observatories = np.unique(time_serie['obs'])
models_lib = np.unique(time_serie['model'])
for jjj in range(len(observatories)):
cond2 = (time_serie['obs']==observatories[jjj])
for iii in range(models_lib.shape[0]):
cond = (time_serie['model'] == models_lib[iii]) & (time_serie['obs']==observatories[jjj])
if cond.sum() > 0:
time_serie_export = time_serie['dates'][cond]
DsN_export = time_serie['DsN'][cond]
DsE_export = time_serie['DsE'][cond]
Ds_export = dict({'N':DsN_export, 'E':DsE_export})
try:
kwargs_method = dict(cfgsetup.items(models_lib[iii]))
except:
kwargs_method = dict()
amp = models[models_lib[iii]].magnifcalc(time_serie_export, param_model, Ds=Ds_export, tb=cfgsetup.getfloat('Modelling', 'tb'), **kwargs_method)
time_serie['amp'][cond] = amp
del amp
# Calculation of fs and fb
# fs, fb = algebra.fsfb(time_serie, cond2, blending=True)
fs, fb = algebra.fsfbwsig(time_serie, cond2, blending=True)
time_serie['fs'][cond2] = fs
time_serie['fb'][cond2] = fb
# Calculation of chi2
time_serie['flux_model'] = time_serie['amp']*time_serie['fs'] + time_serie['fb']
time_serie['chi2pp'] = np.power((time_serie['flux']-time_serie['flux_model'])/time_serie['err_flux'], 2)
chi2_ini = np.sum(time_serie['chi2pp'])
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(0)\
+ '{:+.10e} '.format(param_model['t0'])\
+ '{:+.10e} '.format(param_model['u0'])\
+ '{:+.10e} '.format(param_model['tE'])\
+ '{:+.10e} '.format(param_model['rho'])\
+ '{:+.10e} '.format(param_model['gamma'])\
+ '{:+.10e} '.format(param_model['piEN'])\
+ '{:+.10e} '.format(param_model['piEE'])\
+ '{:+.10e} '.format(param_model['s'])\
+ '{:+.10e} '.format(param_model['q'])\
+ '{:+.10e} '.format(param_model['alpha'])\
+ '{:+.10e} '.format(param_model['dalpha'])\
+ '{:+.10e} '.format(param_model['ds'])\
+ '{:+.10e} '.format(chi2_ini)\
+ '{:>7.3f} '.format(0.0)\
+ '{:8} '.format(datetime.date.today().strftime("%Y%m%d"))\
+ '{:6} '.format(datetime.datetime.utcnow().strftime("%H%M%S"))\
+ '{:+.10e}'.format(chi2_ini/len(time_serie['dates']))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
try:
del lnprobc
del rstate
except:
pass
# Create an archive
path_code = cfgsetup.get('FullPaths', 'Code')
path_event = cfgsetup.get('FullPaths', 'Event')
path_arch = path_event + cfgsetup.get('RelativePaths', 'Archives')
if os.path.exists(path_arch + cfgsetup.get('Controls', 'Archive')):
shutil.rmtree(path_arch + cfgsetup.get('Controls', 'Archive'))
dir = path_event + cfgsetup.get('RelativePaths', 'Chains')
shutil.copytree(dir, path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('RelativePaths', 'Chains'))
dir = path_event + cfgsetup.get('RelativePaths', 'Data')
shutil.copytree(dir, path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('RelativePaths', 'Data'))
file = path_event + "mulan.py"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/mulan.py")
file = path_event + "observatories.ini"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/observatories.ini")
file = path_event + "setup.ini"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/setup.ini")
file = path_event + "advancedsetup.ini"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/advancedsetup.ini")
dir = path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('RelativePaths', 'Plots')
os.makedirs(dir)
try:
shutil.rmtree(path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('Controls', 'Archive') + ".zip")
except:
UserWarning("A ZIP file already exits. Archive not created.")
filename = path_arch + cfgsetup.get('Controls', 'Archive')
shutil.make_archive(filename, 'zip', filename)
shutil.rmtree(filename)
text = "Create archive {0:}".format(cfgsetup.get('Controls', 'Archive'))
communicate(cfgsetup, 3, text, opts=False, prefix=False, newline=False, tab=True)
# if os.path.exists(path_arch + cfgsetup.get('Controls', 'Archive') + '.zip'):
# os.remove(path_arch + cfgsetup.get('Controls', 'Archive') + '.zip')
# shutil.move(filename + '.zip', path_arch)
# Free memory
try:
del id_model, param_model, cond, id, accrate, key_list
del value, chain_lengh, sampler, ndim, nwalkers
del fitted_param, nuisance, result, params, grid_params
del nb_params_grid
except:
pass
if stop==1:
sys.exit("\nProcess stopped by the user.\n") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import requests
import json
from json import encoder
import sys
import six
import ec2
def add_pretty_names(instances):
family_names = {
't2': 'T2 General Purpose',
'r3': 'R3 Memory Optimized',
'r4': 'R4 Memory Optimized',
'c3': 'C3 High-CPU',
'c4': 'C4 High-CPU',
'm3': 'M3 General Purpose',
'i3': 'I3 High I/O',
'cg1': 'Cluster GPU',
'cc2': 'Cluster Compute',
'cr1': 'High Memory Cluster',
'hs1': 'High Storage',
'c1' : 'C1 High-CPU',
'hi1': 'HI1. High I/O',
'm2' : 'M2 High Memory',
'm1' : 'M1 General Purpose',
'm4' : 'M4 General Purpose'
}
for k in instances:
i = instances[k]
# instance type format looks like "db.r4.large"; dropping the "db" prefix
pieces = i['instance_type'].split('.')
family = pieces[1]
short = pieces[2]
prefix = family_names.get(family, family.upper())
extra = None
if short.startswith('8x'):
extra = 'Eight'
elif short.startswith('4x'):
extra = 'Quadruple'
elif short.startswith('2x'):
extra = 'Double'
elif short.startswith('10x'):
extra = 'Deca'
elif short.startswith('x'):
extra = ''
bits = [prefix]
if extra is not None:
bits.extend([extra, 'Extra'])
short = 'Large'
bits.append(short.capitalize())
i['pretty_name'] = ' '.join([b for b in bits if b])
def scrape(output_file, input_file=None):
# if an argument is given, use that as the path for the json file
if input_file:
with open(input_file) as json_data:
data = json.load(json_data)
else:
price_index = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonRDS/current/index.json'
index = requests.get(price_index)
data = index.json()
rds_instances = {}
instances = {}
# region mapping, someone thought it was handy not to include the region id's :(
regions = ec2.get_region_descriptions()
# loop through products, and only fetch available instances for now
for sku, product in six.iteritems(data['products']):
if product.get('productFamily', None) == 'Database Instance':
attributes = product['attributes']
# skip multi-az
if attributes['deploymentOption'] != 'Single-AZ':
continue
# map the region
location = ec2.canonicalize_location(attributes['location'])
instance_type = attributes['instanceType']
try:
region = regions[location]
except KeyError as e:
if location == 'Any':
region = 'us-east-1'
else:
print(f"ERROR: No region data for location={location}. Ignoring instance with sku={sku}, type={instance_type}")
continue
# set the attributes in line with the ec2 index
attributes['region'] = region
attributes['memory'] = attributes['memory'].split(' ')[0]
attributes['network_performance'] = attributes['networkPerformance']
attributes['family'] = attributes['instanceFamily']
attributes['instance_type'] = instance_type
attributes['database_engine'] = attributes['databaseEngine']
attributes['arch'] = attributes['processorArchitecture']
attributes['pricing'] = {}
attributes['pricing'][region] = {}
if attributes['engineCode'] not in ['210', '220']:
rds_instances[sku] = attributes
if instance_type not in instances.keys():
# delete some attributes that are inconsistent among skus
new_attributes = attributes.copy() # make copy so we can keep these attributes with the sku
new_attributes.pop('databaseEdition', None)
new_attributes.pop('databaseEngine', None)
new_attributes.pop('database_engine', None)
new_attributes.pop('deploymentOption', None)
new_attributes.pop('engineCode', None)
new_attributes.pop('licenseModel', None)
new_attributes.pop('location', None)
new_attributes.pop('locationType', None)
new_attributes.pop('operation', None)
new_attributes.pop('region', None)
new_attributes.pop('usagetype', None)
new_attributes['pricing'] = attributes['pricing']
instances[instance_type] = new_attributes
# Parse ondemand pricing
for sku, offers in six.iteritems(data['terms']['OnDemand']):
for code, offer in six.iteritems(offers):
for key, dimension in six.iteritems(offer['priceDimensions']):
# skip these for now
if any(descr in dimension['description'].lower() for descr in ['transfer', 'global', 'storage', 'iops', 'requests', 'multi-az']):
continue
instance = rds_instances.get(sku)
if not instance:
# print(f"WARNING: Received on demand pricing info for unknown sku={sku}")
continue
if instance['region'] not in instances[instance['instance_type']]['pricing']:
instances[instance['instance_type']]['pricing'][instance['region']] = {}
instances[instance['instance_type']]['pricing'][instance['region']][instance['engineCode']] = {
'ondemand': float(dimension['pricePerUnit']['USD'])
}
# keep this for backwards compatibility, even though it's wrong
# (database_engine is not unique, so multiple offerings overlap)
instances[instance['instance_type']]['pricing'][instance['region']][instance['database_engine']] = {
'ondemand': float(dimension['pricePerUnit']['USD'])
}
reserved_mapping = {
'3yr Partial Upfront': 'yrTerm3.partialUpfront',
'1yr Partial Upfront': 'yrTerm1.partialUpfront',
'3yr All Upfront': 'yrTerm3.allUpfront',
'1yr All Upfront': 'yrTerm1.allUpfront',
'1yr No Upfront': 'yrTerm1.noUpfront',
'3yr No Upfront': 'yrTerm3.noUpfront',
}
# Parse reserved pricing
for sku, offers in six.iteritems(data['terms']['Reserved']):
for code, offer in six.iteritems(offers):
for key, dimension in six.iteritems(offer['priceDimensions']):
instance = rds_instances.get(sku)
if not instance:
# print(f"WARNING: Received reserved pricing info for unknown sku={sku}")
continue
# skip multi-az
if instance['deploymentOption'] != 'Single-AZ':
continue
region = instance['region']
# create a regional hash
if region not in instance['pricing']:
instance['pricing'][region] = {}
# create a database_engine hash
if instance['database_engine'] not in instance['pricing'][region]:
instance['pricing'][region][instance['database_engine']] = {}
if instance['engineCode'] not in instance['pricing'][region]:
instance['pricing'][region][instance['engineCode']] = {}
# create a reserved hash
if 'reserved' not in instance['pricing'][region][instance['database_engine']]:
instance['pricing'][region][instance['database_engine']]['reserved'] = {}
if 'reserved' not in instance['pricing'][region][instance['engineCode']]:
instance['pricing'][region][instance['engineCode']]['reserved'] = {}
# store the pricing in placeholder field
reserved_type = "%s %s" % (offer['termAttributes']['LeaseContractLength'], offer['termAttributes']['PurchaseOption'])
instance['pricing'][region][instance['database_engine']]['reserved']['%s-%s' % (reserved_mapping[reserved_type], dimension['unit'].lower())] = float(dimension['pricePerUnit']['USD'])
instance['pricing'][region][instance['engineCode']]['reserved']['%s-%s' % (reserved_mapping[reserved_type], dimension['unit'].lower())] = float(dimension['pricePerUnit']['USD'])
# Calculate all reserved effective pricings (upfront hourly + hourly price)
for instance_type, instance in six.iteritems(instances):
for region, pricing in six.iteritems(instance['pricing']):
for engine, prices in six.iteritems(pricing):
if 'reserved' not in prices:
continue
try:
# no multi-az here
reserved_prices = {
'yrTerm3.partialUpfront': (prices['reserved']['yrTerm3.partialUpfront-quantity'] / (365 * 3) / 24) + prices['reserved']['yrTerm3.partialUpfront-hrs'],
'yrTerm1.partialUpfront': (prices['reserved']['yrTerm1.partialUpfront-quantity'] / 365 / 24) + prices['reserved']['yrTerm1.partialUpfront-hrs'],
'yrTerm3.allUpfront': (prices['reserved']['yrTerm3.allUpfront-quantity'] / (365 * 3) / 24) + prices['reserved']['yrTerm3.allUpfront-hrs'],
'yrTerm1.allUpfront': (prices['reserved']['yrTerm1.allUpfront-quantity'] / 365 / 24) + prices['reserved']['yrTerm1.allUpfront-hrs'],
'yrTerm1.noUpfront': prices['reserved']['yrTerm1.noUpfront-hrs'],
}
if 'yrTerm3.noUpfront-hrs' in prices['reserved']:
reserved_prices['yrTerm3.noUpfront'] = prices['reserved']['yrTerm3.noUpfront-hrs']
instances[instance_type]['pricing'][region][engine]['reserved'] = reserved_prices
except Exception as e:
print("ERROR: Trouble generating RDS reserved price for {}: {!r}".format(instance_type, e))
# print json.dumps(instances['db.m3.medium']['pricing']['eu-west-1']['MySQL'], indent=4)
add_pretty_names(instances)
# write output to file
encoder.FLOAT_REPR = lambda o: format(o, '.5f')
with open(output_file, 'w') as outfile:
json.dump(list(instances.values()), outfile, indent=4)
if __name__ == '__main__':
input_file = None
if len(sys.argv) > 1:
input_file = sys.argv[1]
output_file = './www/rds/instances.json'
scrape(output_file, input_file) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Authentication is implemented using flask_login and different environments can
implement their own login mechanisms by providing an `airflow_login` module
in their PYTHONPATH. airflow_login should be based off the
`airflow.www.login`
"""
from builtins import object
from airflow import version
__version__ = version.version
import logging
import sys
from airflow import configuration as conf
from airflow import settings
from airflow.models import DAG
from flask_admin import BaseView
from importlib import import_module
from airflow.exceptions import AirflowException
if settings.DAGS_FOLDER not in sys.path:
sys.path.append(settings.DAGS_FOLDER)
login = None
def load_login():
auth_backend = 'airflow.default_login'
try:
if conf.getboolean('webserver', 'AUTHENTICATE'):
auth_backend = conf.get('webserver', 'auth_backend')
except conf.AirflowConfigException:
if conf.getboolean('webserver', 'AUTHENTICATE'):
logging.warning(
"auth_backend not found in webserver config reverting to "
"*deprecated* behavior of importing airflow_login")
auth_backend = "airflow_login"
try:
global login
login = import_module(auth_backend)
except ImportError as err:
logging.critical(
"Cannot import authentication module %s. "
"Please correct your authentication backend or disable authentication: %s",
auth_backend, err
)
if conf.getboolean('webserver', 'AUTHENTICATE'):
raise AirflowException("Failed to import authentication backend")
class AirflowViewPlugin(BaseView):
pass
class AirflowMacroPlugin(object):
def __init__(self, namespace):
self.namespace = namespace
from airflow import operators
from airflow import hooks
from airflow import executors
from airflow import macros
from airflow import contrib
operators._integrate_plugins()
hooks._integrate_plugins()
executors._integrate_plugins()
macros._integrate_plugins() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2014 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.base;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.GwtCompatible;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.lang.reflect.Array;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Objects;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.OptionalLong;
import org.jspecify.annotations.Nullable;
/**
* Helper functions that operate on any {@code Object}, and are not already provided in {@link
* Objects}.
*
* <p>See the Guava User Guide on <a
* href="https://github.com/google/guava/wiki/CommonObjectUtilitiesExplained">writing {@code Object}
* methods with {@code MoreObjects}</a>.
*
* @author Laurence Gonsalves
* @since 18.0 (since 2.0 as {@code Objects})
*/
@GwtCompatible
public final class MoreObjects {
/**
* Returns the first of two given parameters that is not {@code null}, if either is, or otherwise
* throws a {@link NullPointerException}.
*
* <p>To find the first non-null element in an iterable, use {@code Iterables.find(iterable,
* Predicates.notNull())}. For varargs, use {@code Iterables.find(Arrays.asList(a, b, c, ...),
* Predicates.notNull())}, static importing as necessary.
*
* <p><b>Note:</b> if {@code first} is represented as an {@link Optional}, this can be
* accomplished with {@link Optional#or(Object) first.or(second)}. That approach also allows for
* lazy evaluation of the fallback instance, using {@link Optional#or(Supplier)
* first.or(supplier)}.
*
* <p><b>Java 9 users:</b> use {@link Objects#requireNonNullElse} instead. For lazy evaluation of
* the fallback, use {@link Objects#requireNonNullElseGet Objects.requireNonNullElseGet(first, ()
* -> second)}}.
*
* @return {@code first} if it is non-null; otherwise {@code second} if it is non-null
* @throws NullPointerException if both {@code first} and {@code second} are null
* @since 18.0 (since 3.0 as {@code Objects.firstNonNull}).
*/
public static <T> T firstNonNull(@Nullable T first, @Nullable T second) {
if (first != null) {
return first;
}
if (second != null) {
return second;
}
throw new NullPointerException("Both parameters are null");
}
/**
* Creates an instance of {@link ToStringHelper}.
*
* <p>This is helpful for implementing {@link Object#toString()}. Specification by example:
*
* {@snippet :
* // Returns "ClassName{}"
* MoreObjects.toStringHelper(this)
* .toString();
*
* // Returns "ClassName{x=1}"
* MoreObjects.toStringHelper(this)
* .add("x", 1)
* .toString();
*
* // Returns "MyObject{x=1}"
* MoreObjects.toStringHelper("MyObject")
* .add("x", 1)
* .toString();
*
* // Returns "ClassName{x=1, y=foo}"
* MoreObjects.toStringHelper(this)
* .add("x", 1)
* .add("y", "foo")
* .toString();
*
* // Returns "ClassName{x=1}"
* MoreObjects.toStringHelper(this)
* .omitNullValues()
* .add("x", 1)
* .add("y", null)
* .toString();
* }
*
* <p>Note that in GWT, class names are often obfuscated.
*
* @param self the object to generate the string for (typically {@code this}), used only for its
* class name
* @since 18.0 (since 2.0 as {@code Objects.toStringHelper}).
*/
public static ToStringHelper toStringHelper(Object self) {
return new ToStringHelper(self.getClass().getSimpleName());
}
/**
* Creates an instance of {@link ToStringHelper} in the same manner as {@link
* #toStringHelper(Object)}, but using the simple name of {@code clazz} instead of using an
* instance's {@link Object#getClass()}.
*
* <p>Note that in GWT, class names are often obfuscated.
*
* @param clazz the {@link Class} of the instance
* @since 18.0 (since 7.0 as {@code Objects.toStringHelper}).
*/
public static ToStringHelper toStringHelper(Class<?> clazz) {
return new ToStringHelper(clazz.getSimpleName());
}
/**
* Creates an instance of {@link ToStringHelper} in the same manner as {@link
* #toStringHelper(Object)}, but using {@code className} instead of using an instance's {@link
* Object#getClass()}.
*
* @param className the name of the instance type
* @since 18.0 (since 7.0 as {@code Objects.toStringHelper}).
*/
public static ToStringHelper toStringHelper(String className) {
return new ToStringHelper(className);
}
/**
* Support class for {@link MoreObjects#toStringHelper}.
*
* @author Jason Lee
* @since 18.0 (since 2.0 as {@code Objects.ToStringHelper}).
*/
public static final class ToStringHelper {
private final String className;
private final ValueHolder holderHead = new ValueHolder();
private ValueHolder holderTail = holderHead;
private boolean omitNullValues = false;
private boolean omitEmptyValues = false;
/** Use {@link MoreObjects#toStringHelper(Object)} to create an instance. */
private ToStringHelper(String className) {
this.className = checkNotNull(className);
}
/**
* Configures the {@link ToStringHelper} so {@link #toString()} will ignore properties with null
* value. The order of calling this method, relative to the {@code add()}/{@code addValue()}
* methods, is not significant.
*
* @since 18.0 (since 12.0 as {@code Objects.ToStringHelper.omitNullValues}).
*/
@CanIgnoreReturnValue
public ToStringHelper omitNullValues() {
omitNullValues = true;
return this;
}
/**
* Configures the {@link ToStringHelper} so {@link #toString()} will ignore properties with
* empty values. The order of calling this method, relative to the {@code add()}/{@code
* addValue()} methods, is not significant.
*
* <p><b>Note:</b> in general, code should assume that the string form returned by {@code
* ToStringHelper} for a given object may change. In particular, the list of types which are
* checked for emptiness is subject to change. We currently check {@code CharSequence}s, {@code
* Collection}s, {@code Map}s, optionals (including Guava's), and arrays.
*
* @since 33.4.0
*/
@CanIgnoreReturnValue
public ToStringHelper omitEmptyValues() {
omitEmptyValues = true;
return this;
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format. If {@code value}
* is {@code null}, the string {@code "null"} is used, unless {@link #omitNullValues()} is
* called, in which case this name/value pair will not be added.
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, @Nullable Object value) {
return addHolder(name, value);
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.add}).
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, boolean value) {
return addUnconditionalHolder(name, String.valueOf(value));
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.add}).
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, char value) {
return addUnconditionalHolder(name, String.valueOf(value));
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.add}).
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, double value) {
return addUnconditionalHolder(name, String.valueOf(value));
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.add}).
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, float value) {
return addUnconditionalHolder(name, String.valueOf(value));
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.add}).
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, int value) {
return addUnconditionalHolder(name, String.valueOf(value));
}
/**
* Adds a name/value pair to the formatted output in {@code name=value} format.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.add}).
*/
@CanIgnoreReturnValue
public ToStringHelper add(String name, long value) {
return addUnconditionalHolder(name, String.valueOf(value));
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, Object)} instead and give value a
* readable name.
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(@Nullable Object value) {
return addHolder(value);
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, boolean)} instead and give value a
* readable name.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.addValue}).
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(boolean value) {
return addUnconditionalHolder(String.valueOf(value));
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, char)} instead and give value a
* readable name.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.addValue}).
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(char value) {
return addUnconditionalHolder(String.valueOf(value));
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, double)} instead and give value a
* readable name.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.addValue}).
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(double value) {
return addUnconditionalHolder(String.valueOf(value));
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, float)} instead and give value a
* readable name.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.addValue}).
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(float value) {
return addUnconditionalHolder(String.valueOf(value));
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, int)} instead and give value a
* readable name.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.addValue}).
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(int value) {
return addUnconditionalHolder(String.valueOf(value));
}
/**
* Adds an unnamed value to the formatted output.
*
* <p>It is strongly encouraged to use {@link #add(String, long)} instead and give value a
* readable name.
*
* @since 18.0 (since 11.0 as {@code Objects.ToStringHelper.addValue}).
*/
@CanIgnoreReturnValue
public ToStringHelper addValue(long value) {
return addUnconditionalHolder(String.valueOf(value));
}
private static boolean isEmpty(Object value) {
// Put types estimated to be the most frequent first.
if (value instanceof CharSequence) {
return ((CharSequence) value).length() == 0;
} else if (value instanceof Collection) {
return ((Collection<?>) value).isEmpty();
} else if (value instanceof Map) {
return ((Map<?, ?>) value).isEmpty();
} else if (value instanceof java.util.Optional) {
return !((java.util.Optional<?>) value).isPresent();
} else if (value instanceof OptionalInt) {
return !((OptionalInt) value).isPresent();
} else if (value instanceof OptionalLong) {
return !((OptionalLong) value).isPresent();
} else if (value instanceof OptionalDouble) {
return !((OptionalDouble) value).isPresent();
} else if (value instanceof Optional) {
return !((Optional) value).isPresent();
} else if (value.getClass().isArray()) {
return Array.getLength(value) == 0;
}
return false;
}
/**
* Returns a string in the format specified by {@link MoreObjects#toStringHelper(Object)}.
*
* <p>After calling this method, you can keep adding more properties to later call toString()
* again and get a more complete representation of the same object; but properties cannot be
* removed, so this only allows limited reuse of the helper instance. The helper allows
* duplication of properties (multiple name/value pairs with the same name can be added).
*/
@Override
public String toString() {
// create a copy to keep it consistent in case value changes
boolean omitNullValuesSnapshot = omitNullValues;
boolean omitEmptyValuesSnapshot = omitEmptyValues;
String nextSeparator = "";
StringBuilder builder = new StringBuilder(32).append(className).append('{');
for (ValueHolder valueHolder = holderHead.next;
valueHolder != null;
valueHolder = valueHolder.next) {
Object value = valueHolder.value;
if (valueHolder instanceof UnconditionalValueHolder
|| (value == null
? !omitNullValuesSnapshot
: (!omitEmptyValuesSnapshot || !isEmpty(value)))) {
builder.append(nextSeparator);
nextSeparator = ", ";
if (valueHolder.name != null) {
builder.append(valueHolder.name).append('=');
}
if (value != null && value.getClass().isArray()) {
Object[] objectArray = {value};
String arrayString = Arrays.deepToString(objectArray);
builder.append(arrayString, 1, arrayString.length() - 1);
} else {
builder.append(value);
}
}
}
return builder.append('}').toString();
}
private ValueHolder addHolder() {
ValueHolder valueHolder = new ValueHolder();
holderTail = holderTail.next = valueHolder;
return valueHolder;
}
@CanIgnoreReturnValue
private ToStringHelper addHolder(@Nullable Object value) {
ValueHolder valueHolder = addHolder();
valueHolder.value = value;
return this;
}
@CanIgnoreReturnValue
private ToStringHelper addHolder(String name, @Nullable Object value) {
ValueHolder valueHolder = addHolder();
valueHolder.value = value;
valueHolder.name = checkNotNull(name);
return this;
}
private UnconditionalValueHolder addUnconditionalHolder() {
UnconditionalValueHolder valueHolder = new UnconditionalValueHolder();
holderTail = holderTail.next = valueHolder;
return valueHolder;
}
@CanIgnoreReturnValue
private ToStringHelper addUnconditionalHolder(Object value) {
UnconditionalValueHolder valueHolder = addUnconditionalHolder();
valueHolder.value = value;
return this;
}
@CanIgnoreReturnValue
private ToStringHelper addUnconditionalHolder(String name, Object value) {
UnconditionalValueHolder valueHolder = addUnconditionalHolder();
valueHolder.value = value;
valueHolder.name = checkNotNull(name);
return this;
}
// Holder object for values that might be null and/or empty.
static class ValueHolder {
@Nullable String name;
@Nullable Object value;
@Nullable ValueHolder next;
}
/**
* Holder object for values that cannot be null or empty (will be printed unconditionally). This
* helps to shortcut most calls to isEmpty(), which is important because the check for emptiness
* is relatively expensive. Use a subtype so this also doesn't need any extra storage.
*/
private static final class UnconditionalValueHolder extends ValueHolder {}
}
private MoreObjects() {}
} | java | github | https://github.com/google/guava | guava/src/com/google/common/base/MoreObjects.java |
"""This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print 'a'
... print
... print 'b'
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite() | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* indexam.c
* general index access method routines
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/index/indexam.c
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
* index_close - close an index relation
* index_beginscan - start a scan of an index with amgettuple
* index_beginscan_bitmap - start a scan of an index with amgetbitmap
* index_rescan - restart a scan of an index
* index_endscan - end a scan
* index_insert - insert an index tuple into a relation
* index_markpos - mark a scan position
* index_restrpos - restore a scan position
* index_parallelscan_estimate - estimate shared memory for parallel scan
* index_parallelscan_initialize - initialize parallel scan
* index_parallelrescan - (re)start a parallel scan of an index
* index_beginscan_parallel - join parallel index scan
* index_getnext_tid - get the next TID from a scan
* index_fetch_heap - get the scan's next heap tuple
* index_getnext_slot - get the next tuple from a scan
* index_getbitmap - get all tuples from a scan
* index_bulk_delete - bulk deletion of index tuples
* index_vacuum_cleanup - post-deletion cleanup of an index
* index_can_return - does index support index-only scans?
* index_getprocid - get a support procedure OID
* index_getprocinfo - get a support procedure's lookup info
*
* NOTES
* This file contains the index_ routines which used
* to be a scattered collection of stuff in access/genam.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/amapi.h"
#include "access/relation.h"
#include "access/reloptions.h"
#include "access/relscan.h"
#include "access/tableam.h"
#include "catalog/index.h"
#include "catalog/pg_type.h"
#include "nodes/execnodes.h"
#include "pgstat.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "utils/ruleutils.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
/* ----------------------------------------------------------------
* macros used in index_ routines
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
* that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs, as well as prevent
* user errors like index expressions that access their own tables. The check
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for
* ambuild to call its subsidiary routines through this file.
* ----------------------------------------------------------------
*/
#define RELATION_CHECKS \
do { \
Assert(RelationIsValid(indexRelation)); \
Assert(indexRelation->rd_indam); \
if (unlikely(ReindexIsProcessingIndex(RelationGetRelid(indexRelation)))) \
ereport(ERROR, \
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), \
errmsg("cannot access index \"%s\" while it is being reindexed", \
RelationGetRelationName(indexRelation)))); \
} while(0)
#define SCAN_CHECKS \
( \
AssertMacro(scan), \
AssertMacro(RelationIsValid(scan->indexRelation)), \
AssertMacro(scan->indexRelation->rd_indam) \
)
#define CHECK_REL_PROCEDURE(pname) \
do { \
if (indexRelation->rd_indam->pname == NULL) \
elog(ERROR, "function \"%s\" is not defined for index \"%s\"", \
CppAsString(pname), RelationGetRelationName(indexRelation)); \
} while(0)
#define CHECK_SCAN_PROCEDURE(pname) \
do { \
if (scan->indexRelation->rd_indam->pname == NULL) \
elog(ERROR, "function \"%s\" is not defined for index \"%s\"", \
CppAsString(pname), RelationGetRelationName(scan->indexRelation)); \
} while(0)
static IndexScanDesc index_beginscan_internal(Relation indexRelation,
int nkeys, int norderbys, Snapshot snapshot,
ParallelIndexScanDesc pscan, bool temp_snap);
static inline void validate_relation_kind(Relation r);
/* ----------------------------------------------------------------
* index_ interface functions
* ----------------------------------------------------------------
*/
/* ----------------
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
* obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
* An error is raised if the index does not exist.
*
* This is a convenience routine adapted for indexscan use.
* Some callers may prefer to use relation_open directly.
* ----------------
*/
Relation
index_open(Oid relationId, LOCKMODE lockmode)
{
Relation r;
r = relation_open(relationId, lockmode);
validate_relation_kind(r);
return r;
}
/* ----------------
* try_index_open - open an index relation by relation OID
*
* Same as index_open, except return NULL instead of failing
* if the relation does not exist.
* ----------------
*/
Relation
try_index_open(Oid relationId, LOCKMODE lockmode)
{
Relation r;
r = try_relation_open(relationId, lockmode);
/* leave if index does not exist */
if (!r)
return NULL;
validate_relation_kind(r);
return r;
}
/* ----------------
* index_close - close an index relation
*
* If lockmode is not "NoLock", we then release the specified lock.
*
* Note that it is often sensible to hold a lock beyond index_close;
* in that case, the lock is released automatically at xact end.
* ----------------
*/
void
index_close(Relation relation, LOCKMODE lockmode)
{
LockRelId relid = relation->rd_lockInfo.lockRelId;
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
/* The relcache does the real work... */
RelationClose(relation);
if (lockmode != NoLock)
UnlockRelationId(&relid, lockmode);
}
/* ----------------
* validate_relation_kind - check the relation's kind
*
* Make sure relkind is an index or a partitioned index.
* ----------------
*/
static inline void
validate_relation_kind(Relation r)
{
if (r->rd_rel->relkind != RELKIND_INDEX &&
r->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not an index",
RelationGetRelationName(r))));
}
/* ----------------
* index_insert - insert an index tuple into a relation
* ----------------
*/
bool
index_insert(Relation indexRelation,
Datum *values,
bool *isnull,
ItemPointer heap_t_ctid,
Relation heapRelation,
IndexUniqueCheck checkUnique,
bool indexUnchanged,
IndexInfo *indexInfo)
{
RELATION_CHECKS;
CHECK_REL_PROCEDURE(aminsert);
if (!(indexRelation->rd_indam->ampredlocks))
CheckForSerializableConflictIn(indexRelation,
(ItemPointer) NULL,
InvalidBlockNumber);
return indexRelation->rd_indam->aminsert(indexRelation, values, isnull,
heap_t_ctid, heapRelation,
checkUnique, indexUnchanged,
indexInfo);
}
/* -------------------------
* index_insert_cleanup - clean up after all index inserts are done
* -------------------------
*/
void
index_insert_cleanup(Relation indexRelation,
IndexInfo *indexInfo)
{
RELATION_CHECKS;
if (indexRelation->rd_indam->aminsertcleanup)
indexRelation->rd_indam->aminsertcleanup(indexRelation, indexInfo);
}
/*
* index_beginscan - start a scan of an index with amgettuple
*
* Caller must be holding suitable locks on the heap and the index.
*/
IndexScanDesc
index_beginscan(Relation heapRelation,
Relation indexRelation,
Snapshot snapshot,
IndexScanInstrumentation *instrument,
int nkeys, int norderbys)
{
IndexScanDesc scan;
Assert(snapshot != InvalidSnapshot);
/* Check that a historic snapshot is not used for non-catalog tables */
if (IsHistoricMVCCSnapshot(snapshot) &&
!RelationIsAccessibleInLogicalDecoding(heapRelation))
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
errmsg("cannot query non-catalog table \"%s\" during logical decoding",
RelationGetRelationName(heapRelation))));
}
scan = index_beginscan_internal(indexRelation, nkeys, norderbys, snapshot, NULL, false);
/*
* Save additional parameters into the scandesc. Everything else was set
* up by RelationGetIndexScan.
*/
scan->heapRelation = heapRelation;
scan->xs_snapshot = snapshot;
scan->instrument = instrument;
/* prepare to fetch index matches from table */
scan->xs_heapfetch = table_index_fetch_begin(heapRelation);
return scan;
}
/*
* index_beginscan_bitmap - start a scan of an index with amgetbitmap
*
* As above, caller had better be holding some lock on the parent heap
* relation, even though it's not explicitly mentioned here.
*/
IndexScanDesc
index_beginscan_bitmap(Relation indexRelation,
Snapshot snapshot,
IndexScanInstrumentation *instrument,
int nkeys)
{
IndexScanDesc scan;
Assert(snapshot != InvalidSnapshot);
scan = index_beginscan_internal(indexRelation, nkeys, 0, snapshot, NULL, false);
/*
* Save additional parameters into the scandesc. Everything else was set
* up by RelationGetIndexScan.
*/
scan->xs_snapshot = snapshot;
scan->instrument = instrument;
return scan;
}
/*
* index_beginscan_internal --- common code for index_beginscan variants
*/
static IndexScanDesc
index_beginscan_internal(Relation indexRelation,
int nkeys, int norderbys, Snapshot snapshot,
ParallelIndexScanDesc pscan, bool temp_snap)
{
IndexScanDesc scan;
RELATION_CHECKS;
CHECK_REL_PROCEDURE(ambeginscan);
if (!(indexRelation->rd_indam->ampredlocks))
PredicateLockRelation(indexRelation, snapshot);
/*
* We hold a reference count to the relcache entry throughout the scan.
*/
RelationIncrementReferenceCount(indexRelation);
/*
* Tell the AM to open a scan.
*/
scan = indexRelation->rd_indam->ambeginscan(indexRelation, nkeys,
norderbys);
/* Initialize information for parallel scan. */
scan->parallel_scan = pscan;
scan->xs_temp_snap = temp_snap;
return scan;
}
/* ----------------
* index_rescan - (re)start a scan of an index
*
* During a restart, the caller may specify a new set of scankeys and/or
* orderbykeys; but the number of keys cannot differ from what index_beginscan
* was told. (Later we might relax that to "must not exceed", but currently
* the index AMs tend to assume that scan->numberOfKeys is what to believe.)
* To restart the scan without changing keys, pass NULL for the key arrays.
* (Of course, keys *must* be passed on the first call, unless
* scan->numberOfKeys is zero.)
* ----------------
*/
void
index_rescan(IndexScanDesc scan,
ScanKey keys, int nkeys,
ScanKey orderbys, int norderbys)
{
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amrescan);
Assert(nkeys == scan->numberOfKeys);
Assert(norderbys == scan->numberOfOrderBys);
/* Release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
table_index_fetch_reset(scan->xs_heapfetch);
scan->kill_prior_tuple = false; /* for safety */
scan->xs_heap_continue = false;
scan->indexRelation->rd_indam->amrescan(scan, keys, nkeys,
orderbys, norderbys);
}
/* ----------------
* index_endscan - end a scan
* ----------------
*/
void
index_endscan(IndexScanDesc scan)
{
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amendscan);
/* Release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
{
table_index_fetch_end(scan->xs_heapfetch);
scan->xs_heapfetch = NULL;
}
/* End the AM's scan */
scan->indexRelation->rd_indam->amendscan(scan);
/* Release index refcount acquired by index_beginscan */
RelationDecrementReferenceCount(scan->indexRelation);
if (scan->xs_temp_snap)
UnregisterSnapshot(scan->xs_snapshot);
/* Release the scan data structure itself */
IndexScanEnd(scan);
}
/* ----------------
* index_markpos - mark a scan position
* ----------------
*/
void
index_markpos(IndexScanDesc scan)
{
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(ammarkpos);
scan->indexRelation->rd_indam->ammarkpos(scan);
}
/* ----------------
* index_restrpos - restore a scan position
*
* NOTE: this only restores the internal scan state of the index AM. See
* comments for ExecRestrPos().
*
* NOTE: For heap, in the presence of HOT chains, mark/restore only works
* correctly if the scan's snapshot is MVCC-safe; that ensures that there's at
* most one returnable tuple in each HOT chain, and so restoring the prior
* state at the granularity of the index AM is sufficient. Since the only
* current user of mark/restore functionality is nodeMergejoin.c, this
* effectively means that merge-join plans only work for MVCC snapshots. This
* could be fixed if necessary, but for now it seems unimportant.
* ----------------
*/
void
index_restrpos(IndexScanDesc scan)
{
Assert(IsMVCCSnapshot(scan->xs_snapshot));
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amrestrpos);
/* release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
table_index_fetch_reset(scan->xs_heapfetch);
scan->kill_prior_tuple = false; /* for safety */
scan->xs_heap_continue = false;
scan->indexRelation->rd_indam->amrestrpos(scan);
}
/*
* index_parallelscan_estimate - estimate shared memory for parallel scan
*
* When instrument=true, estimate includes SharedIndexScanInstrumentation
* space. When parallel_aware=true, estimate includes whatever space the
* index AM's amestimateparallelscan routine requested when called.
*/
Size
index_parallelscan_estimate(Relation indexRelation, int nkeys, int norderbys,
Snapshot snapshot, bool instrument,
bool parallel_aware, int nworkers)
{
Size nbytes;
Assert(instrument || parallel_aware);
RELATION_CHECKS;
nbytes = offsetof(ParallelIndexScanDescData, ps_snapshot_data);
nbytes = add_size(nbytes, EstimateSnapshotSpace(snapshot));
nbytes = MAXALIGN(nbytes);
if (instrument)
{
Size sharedinfosz;
sharedinfosz = offsetof(SharedIndexScanInstrumentation, winstrument) +
nworkers * sizeof(IndexScanInstrumentation);
nbytes = add_size(nbytes, sharedinfosz);
nbytes = MAXALIGN(nbytes);
}
/*
* If parallel scan index AM interface can't be used (or index AM provides
* no such interface), assume there is no AM-specific data needed
*/
if (parallel_aware &&
indexRelation->rd_indam->amestimateparallelscan != NULL)
nbytes = add_size(nbytes,
indexRelation->rd_indam->amestimateparallelscan(indexRelation,
nkeys,
norderbys));
return nbytes;
}
/*
* index_parallelscan_initialize - initialize parallel scan
*
* We initialize both the ParallelIndexScanDesc proper and the AM-specific
* information which follows it.
*
* This function calls access method specific initialization routine to
* initialize am specific information. Call this just once in the leader
* process; then, individual workers attach via index_beginscan_parallel.
*/
void
index_parallelscan_initialize(Relation heapRelation, Relation indexRelation,
Snapshot snapshot, bool instrument,
bool parallel_aware, int nworkers,
SharedIndexScanInstrumentation **sharedinfo,
ParallelIndexScanDesc target)
{
Size offset;
Assert(instrument || parallel_aware);
RELATION_CHECKS;
offset = add_size(offsetof(ParallelIndexScanDescData, ps_snapshot_data),
EstimateSnapshotSpace(snapshot));
offset = MAXALIGN(offset);
target->ps_locator = heapRelation->rd_locator;
target->ps_indexlocator = indexRelation->rd_locator;
target->ps_offset_ins = 0;
target->ps_offset_am = 0;
SerializeSnapshot(snapshot, target->ps_snapshot_data);
if (instrument)
{
Size sharedinfosz;
target->ps_offset_ins = offset;
sharedinfosz = offsetof(SharedIndexScanInstrumentation, winstrument) +
nworkers * sizeof(IndexScanInstrumentation);
offset = add_size(offset, sharedinfosz);
offset = MAXALIGN(offset);
/* Set leader's *sharedinfo pointer, and initialize stats */
*sharedinfo = (SharedIndexScanInstrumentation *)
OffsetToPointer(target, target->ps_offset_ins);
memset(*sharedinfo, 0, sharedinfosz);
(*sharedinfo)->num_workers = nworkers;
}
/* aminitparallelscan is optional; assume no-op if not provided by AM */
if (parallel_aware && indexRelation->rd_indam->aminitparallelscan != NULL)
{
void *amtarget;
target->ps_offset_am = offset;
amtarget = OffsetToPointer(target, target->ps_offset_am);
indexRelation->rd_indam->aminitparallelscan(amtarget);
}
}
/* ----------------
* index_parallelrescan - (re)start a parallel scan of an index
* ----------------
*/
void
index_parallelrescan(IndexScanDesc scan)
{
SCAN_CHECKS;
if (scan->xs_heapfetch)
table_index_fetch_reset(scan->xs_heapfetch);
/* amparallelrescan is optional; assume no-op if not provided by AM */
if (scan->indexRelation->rd_indam->amparallelrescan != NULL)
scan->indexRelation->rd_indam->amparallelrescan(scan);
}
/*
* index_beginscan_parallel - join parallel index scan
*
* Caller must be holding suitable locks on the heap and the index.
*/
IndexScanDesc
index_beginscan_parallel(Relation heaprel, Relation indexrel,
IndexScanInstrumentation *instrument,
int nkeys, int norderbys,
ParallelIndexScanDesc pscan)
{
Snapshot snapshot;
IndexScanDesc scan;
Assert(RelFileLocatorEquals(heaprel->rd_locator, pscan->ps_locator));
Assert(RelFileLocatorEquals(indexrel->rd_locator, pscan->ps_indexlocator));
snapshot = RestoreSnapshot(pscan->ps_snapshot_data);
RegisterSnapshot(snapshot);
scan = index_beginscan_internal(indexrel, nkeys, norderbys, snapshot,
pscan, true);
/*
* Save additional parameters into the scandesc. Everything else was set
* up by index_beginscan_internal.
*/
scan->heapRelation = heaprel;
scan->xs_snapshot = snapshot;
scan->instrument = instrument;
/* prepare to fetch index matches from table */
scan->xs_heapfetch = table_index_fetch_begin(heaprel);
return scan;
}
/* ----------------
* index_getnext_tid - get the next TID from a scan
*
* The result is the next TID satisfying the scan keys,
* or NULL if no more matching tuples exist.
* ----------------
*/
ItemPointer
index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
{
bool found;
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amgettuple);
/* XXX: we should assert that a snapshot is pushed or registered */
Assert(TransactionIdIsValid(RecentXmin));
/*
* The AM's amgettuple proc finds the next index entry matching the scan
* keys, and puts the TID into scan->xs_heaptid. It should also set
* scan->xs_recheck and possibly scan->xs_itup/scan->xs_hitup, though we
* pay no attention to those fields here.
*/
found = scan->indexRelation->rd_indam->amgettuple(scan, direction);
/* Reset kill flag immediately for safety */
scan->kill_prior_tuple = false;
scan->xs_heap_continue = false;
/* If we're out of index entries, we're done */
if (!found)
{
/* release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
table_index_fetch_reset(scan->xs_heapfetch);
return NULL;
}
Assert(ItemPointerIsValid(&scan->xs_heaptid));
pgstat_count_index_tuples(scan->indexRelation, 1);
/* Return the TID of the tuple we found. */
return &scan->xs_heaptid;
}
/* ----------------
* index_fetch_heap - get the scan's next heap tuple
*
* The result is a visible heap tuple associated with the index TID most
* recently fetched by index_getnext_tid, or NULL if no more matching tuples
* exist. (There can be more than one matching tuple because of HOT chains,
* although when using an MVCC snapshot it should be impossible for more than
* one such tuple to exist.)
*
* On success, the buffer containing the heap tup is pinned (the pin will be
* dropped in a future index_getnext_tid, index_fetch_heap or index_endscan
* call).
*
* Note: caller must check scan->xs_recheck, and perform rechecking of the
* scan keys if required. We do not do that here because we don't have
* enough information to do it efficiently in the general case.
* ----------------
*/
bool
index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot)
{
bool all_dead = false;
bool found;
found = table_index_fetch_tuple(scan->xs_heapfetch, &scan->xs_heaptid,
scan->xs_snapshot, slot,
&scan->xs_heap_continue, &all_dead);
if (found)
pgstat_count_heap_fetch(scan->indexRelation);
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
* AM to kill its entry for that TID (this will take effect in the next
* amgettuple call, in index_getnext_tid). We do not do this when in
* recovery because it may violate MVCC to do so. See comments in
* RelationGetIndexScan().
*/
if (!scan->xactStartedInRecovery)
scan->kill_prior_tuple = all_dead;
return found;
}
/* ----------------
* index_getnext_slot - get the next tuple from a scan
*
* The result is true if a tuple satisfying the scan keys and the snapshot was
* found, false otherwise. The tuple is stored in the specified slot.
*
* On success, resources (like buffer pins) are likely to be held, and will be
* dropped by a future index_getnext_tid, index_fetch_heap or index_endscan
* call).
*
* Note: caller must check scan->xs_recheck, and perform rechecking of the
* scan keys if required. We do not do that here because we don't have
* enough information to do it efficiently in the general case.
* ----------------
*/
bool
index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
{
for (;;)
{
if (!scan->xs_heap_continue)
{
ItemPointer tid;
/* Time to fetch the next TID from the index */
tid = index_getnext_tid(scan, direction);
/* If we're out of index entries, we're done */
if (tid == NULL)
break;
Assert(ItemPointerEquals(tid, &scan->xs_heaptid));
}
/*
* Fetch the next (or only) visible heap tuple for this index entry.
* If we don't find anything, loop around and grab the next TID from
* the index.
*/
Assert(ItemPointerIsValid(&scan->xs_heaptid));
if (index_fetch_heap(scan, slot))
return true;
}
return false;
}
/* ----------------
* index_getbitmap - get all tuples at once from an index scan
*
* Adds the TIDs of all heap tuples satisfying the scan keys to a bitmap.
* Since there's no interlock between the index scan and the eventual heap
* access, this is only safe to use with MVCC-based snapshots: the heap
* item slot could have been replaced by a newer tuple by the time we get
* to it.
*
* Returns the number of matching tuples found. (Note: this might be only
* approximate, so it should only be used for statistical purposes.)
* ----------------
*/
int64
index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap)
{
int64 ntids;
SCAN_CHECKS;
CHECK_SCAN_PROCEDURE(amgetbitmap);
/* just make sure this is false... */
scan->kill_prior_tuple = false;
/*
* have the am's getbitmap proc do all the work.
*/
ntids = scan->indexRelation->rd_indam->amgetbitmap(scan, bitmap);
pgstat_count_index_tuples(scan->indexRelation, ntids);
return ntids;
}
/* ----------------
* index_bulk_delete - do mass deletion of index entries
*
* callback routine tells whether a given main-heap tuple is
* to be deleted
*
* return value is an optional palloc'd struct of statistics
* ----------------
*/
IndexBulkDeleteResult *
index_bulk_delete(IndexVacuumInfo *info,
IndexBulkDeleteResult *istat,
IndexBulkDeleteCallback callback,
void *callback_state)
{
Relation indexRelation = info->index;
RELATION_CHECKS;
CHECK_REL_PROCEDURE(ambulkdelete);
return indexRelation->rd_indam->ambulkdelete(info, istat,
callback, callback_state);
}
/* ----------------
* index_vacuum_cleanup - do post-deletion cleanup of an index
*
* return value is an optional palloc'd struct of statistics
* ----------------
*/
IndexBulkDeleteResult *
index_vacuum_cleanup(IndexVacuumInfo *info,
IndexBulkDeleteResult *istat)
{
Relation indexRelation = info->index;
RELATION_CHECKS;
CHECK_REL_PROCEDURE(amvacuumcleanup);
return indexRelation->rd_indam->amvacuumcleanup(info, istat);
}
/* ----------------
* index_can_return
*
* Does the index access method support index-only scans for the given
* column?
* ----------------
*/
bool
index_can_return(Relation indexRelation, int attno)
{
RELATION_CHECKS;
/* amcanreturn is optional; assume false if not provided by AM */
if (indexRelation->rd_indam->amcanreturn == NULL)
return false;
return indexRelation->rd_indam->amcanreturn(indexRelation, attno);
}
/* ----------------
* index_getprocid
*
* Index access methods typically require support routines that are
* not directly the implementation of any WHERE-clause query operator
* and so cannot be kept in pg_amop. Instead, such routines are kept
* in pg_amproc. These registered procedure OIDs are assigned numbers
* according to a convention established by the access method.
* The general index code doesn't know anything about the routines
* involved; it just builds an ordered list of them for
* each attribute on which an index is defined.
*
* As of Postgres 8.3, support routines within an operator family
* are further subdivided by the "left type" and "right type" of the
* query operator(s) that they support. The "default" functions for a
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
* type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
* This routine returns the requested default procedure OID for a
* particular indexed attribute.
* ----------------
*/
RegProcedure
index_getprocid(Relation irel,
AttrNumber attnum,
uint16 procnum)
{
RegProcedure *loc;
int nproc;
int procindex;
nproc = irel->rd_indam->amsupport;
Assert(procnum > 0 && procnum <= (uint16) nproc);
procindex = (nproc * (attnum - 1)) + (procnum - 1);
loc = irel->rd_support;
Assert(loc != NULL);
return loc[procindex];
}
/* ----------------
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
* support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
* any relcache rebuild! Therefore, either use the callinfo right away,
* or save it only after having acquired some type of lock on the index rel.
* ----------------
*/
FmgrInfo *
index_getprocinfo(Relation irel,
AttrNumber attnum,
uint16 procnum)
{
FmgrInfo *locinfo;
int nproc;
int optsproc;
int procindex;
nproc = irel->rd_indam->amsupport;
optsproc = irel->rd_indam->amoptsprocnum;
Assert(procnum > 0 && procnum <= (uint16) nproc);
procindex = (nproc * (attnum - 1)) + (procnum - 1);
locinfo = irel->rd_supportinfo;
Assert(locinfo != NULL);
locinfo += procindex;
/* Initialize the lookup info if first time through */
if (locinfo->fn_oid == InvalidOid)
{
RegProcedure *loc = irel->rd_support;
RegProcedure procId;
Assert(loc != NULL);
procId = loc[procindex];
/*
* Complain if function was not found during IndexSupportInitialize.
* This should not happen unless the system tables contain bogus
* entries for the index opclass. (If an AM wants to allow a support
* function to be optional, it can use index_getprocid.)
*/
if (!RegProcedureIsValid(procId))
elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
procnum, attnum, RelationGetRelationName(irel));
fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
if (procnum != optsproc)
{
/* Initialize locinfo->fn_expr with opclass options Const */
bytea **attoptions = RelationGetIndexAttOptions(irel, false);
MemoryContext oldcxt = MemoryContextSwitchTo(irel->rd_indexcxt);
set_fn_opclass_options(locinfo, attoptions[attnum - 1]);
MemoryContextSwitchTo(oldcxt);
}
}
return locinfo;
}
/* ----------------
* index_store_float8_orderby_distances
*
* Convert AM distance function's results (that can be inexact)
* to ORDER BY types and save them into xs_orderbyvals/xs_orderbynulls
* for a possible recheck.
* ----------------
*/
void
index_store_float8_orderby_distances(IndexScanDesc scan, Oid *orderByTypes,
IndexOrderByDistance *distances,
bool recheckOrderBy)
{
int i;
Assert(distances || !recheckOrderBy);
scan->xs_recheckorderby = recheckOrderBy;
for (i = 0; i < scan->numberOfOrderBys; i++)
{
if (orderByTypes[i] == FLOAT8OID)
{
if (distances && !distances[i].isnull)
{
scan->xs_orderbyvals[i] = Float8GetDatum(distances[i].value);
scan->xs_orderbynulls[i] = false;
}
else
{
scan->xs_orderbyvals[i] = (Datum) 0;
scan->xs_orderbynulls[i] = true;
}
}
else if (orderByTypes[i] == FLOAT4OID)
{
/* convert distance function's result to ORDER BY type */
if (distances && !distances[i].isnull)
{
scan->xs_orderbyvals[i] = Float4GetDatum((float4) distances[i].value);
scan->xs_orderbynulls[i] = false;
}
else
{
scan->xs_orderbyvals[i] = (Datum) 0;
scan->xs_orderbynulls[i] = true;
}
}
else
{
/*
* If the ordering operator's return value is anything else, we
* don't know how to convert the float8 bound calculated by the
* distance function to that. The executor won't actually need
* the order by values we return here, if there are no lossy
* results, so only insist on converting if the *recheck flag is
* set.
*/
if (scan->xs_recheckorderby)
elog(ERROR, "ORDER BY operator must return float8 or float4 if the distance function is lossy");
scan->xs_orderbynulls[i] = true;
}
}
}
/* ----------------
* index_opclass_options
*
* Parse opclass-specific options for index column.
* ----------------
*/
bytea *
index_opclass_options(Relation indrel, AttrNumber attnum, Datum attoptions,
bool validate)
{
int amoptsprocnum = indrel->rd_indam->amoptsprocnum;
Oid procid = InvalidOid;
FmgrInfo *procinfo;
local_relopts relopts;
/* fetch options support procedure if specified */
if (amoptsprocnum != 0)
procid = index_getprocid(indrel, attnum, amoptsprocnum);
if (!OidIsValid(procid))
{
Oid opclass;
Datum indclassDatum;
oidvector *indclass;
if (!DatumGetPointer(attoptions))
return NULL; /* ok, no options, no procedure */
/*
* Report an error if the opclass's options-parsing procedure does not
* exist but the opclass options are specified.
*/
indclassDatum = SysCacheGetAttrNotNull(INDEXRELID, indrel->rd_indextuple,
Anum_pg_index_indclass);
indclass = (oidvector *) DatumGetPointer(indclassDatum);
opclass = indclass->values[attnum - 1];
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("operator class %s has no options",
generate_opclass_name(opclass))));
}
init_local_reloptions(&relopts, 0);
procinfo = index_getprocinfo(indrel, attnum, amoptsprocnum);
(void) FunctionCall1(procinfo, PointerGetDatum(&relopts));
return build_local_reloptions(&relopts, attoptions, validate);
} | c | github | https://github.com/postgres/postgres | src/backend/access/index/indexam.c |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/davicom,dm9051.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Davicom DM9051 SPI Ethernet Controller
maintainers:
- Joseph CHANG <josright123@gmail.com>
description: |
The DM9051 is a fully integrated and cost-effective low pin count single
chip Fast Ethernet controller with a Serial Peripheral Interface (SPI).
allOf:
- $ref: ethernet-controller.yaml#
properties:
compatible:
const: davicom,dm9051
reg:
maxItems: 1
spi-max-frequency:
maximum: 45000000
interrupts:
maxItems: 1
local-mac-address: true
mac-address: true
required:
- compatible
- reg
- spi-max-frequency
- interrupts
additionalProperties: false
examples:
# Raspberry Pi platform
- |
/* for Raspberry Pi with pin control stuff for GPIO irq */
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
ethernet@0 {
compatible = "davicom,dm9051";
reg = <0>; /* spi chip select */
local-mac-address = [00 00 00 00 00 00];
interrupt-parent = <&gpio>;
interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
spi-max-frequency = <31200000>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/net/davicom,dm9051.yaml |
'''
Copyright 2016 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
Zinc Field List Editor Widget
Allows a Zinc Field object to be created/edited in Qt / Python.
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from PySide import QtCore, QtGui
from opencmiss.zinc.status import OK as ZINC_OK
from opencmiss.zinc.field import Field
from opencmiss.neon.ui.editors.ui_fieldlisteditorwidget import Ui_FieldListEditorWidget
class FieldListEditorWidget(QtGui.QWidget):
def __init__(self, parent=None):
'''
Call the super class init functions
'''
QtGui.QWidget.__init__(self, parent)
self._fieldmodule = None
# Using composition to include the visual element of the GUI.
self.ui = Ui_FieldListEditorWidget()
self._fieldItems = None
self._neonRegion = None
self._timekeeper = None
self.ui.setupUi(self)
self._makeConnections()
self._field = None
@QtCore.Slot(Field, str)
def editorCreateField(self, field, fieldType):
self._neonRegion.addFieldTypeToDict(field, fieldType)
self.setField(field)
def _makeConnections(self):
self.ui.field_listview.clicked.connect(self.fieldListItemClicked)
self.ui.addFieldButton.clicked.connect(self.addFieldClicked)
self.ui.field_editor._fieldCreated.connect(self.editorCreateField)
def getFieldmodule(self):
'''
Get the fieldmodule currently in the editor
'''
return self._fieldmodule
def _fieldmoduleCallback(self, fieldmoduleevent):
'''
Callback for change in fields; may need to rebuild field list
'''
changeSummary = fieldmoduleevent.getSummaryFieldChangeFlags()
# print "_fieldmoduleCallback changeSummary =", changeSummary
if (0 != (changeSummary & (Field.CHANGE_FLAG_IDENTIFIER | Field.CHANGE_FLAG_ADD | Field.CHANGE_FLAG_REMOVE))):
self._buildFieldsList()
def setTimekeeper(self, timekeeper):
'''
Set the current scene in the editor
'''
if not (timekeeper and timekeeper.isValid()):
self._timekeeper = None
else:
self._timekeeper = timekeeper
if self._timekeeper:
self.ui.field_editor.setTimekeeper(self._timekeeper)
def setFieldmodule(self, fieldmodule):
'''
Set the current scene in the editor
'''
if not (fieldmodule and fieldmodule.isValid()):
self._fieldmodule = None
else:
self._fieldmodule = fieldmodule
if self._fieldmodule:
self.ui.field_editor.setFieldmodule(self._fieldmodule)
self._buildFieldsList()
if self._fieldmodule:
self._fieldmodulenotifier = self._fieldmodule.createFieldmodulenotifier()
self._fieldmodulenotifier.setCallback(self._fieldmoduleCallback)
else:
self._fieldmodulenotifier = None
def setNeonRegion(self, neonRegion):
'''
Set the current scene in the editor
'''
self._neonRegion = neonRegion
def listItemEdited(self, item):
field = item.data()
if field and field.isValid():
newName = item.text()
oldName = field.getName()
if newName != oldName:
if field.setName(newName) != ZINC_OK:
item.setText(field.getName())
self._neonRegion.replaceFieldTypeKey(oldName, newName)
def _buildFieldsList(self):
'''
Fill the graphics list view with the list of graphics for current region/scene
'''
if self._fieldItems is not None:
self._fieldItems.clear() # Must clear or holds on to field references
self._fieldItems = QtGui.QStandardItemModel(self.ui.field_listview)
selectedIndex = None
if self._fieldmodule:
selectedField = self.ui.field_editor.getField()
fieldIterator = self._fieldmodule.createFielditerator()
field = fieldIterator.next()
while field and field.isValid():
name = field.getName()
item = QtGui.QStandardItem(name)
item.setData(field)
item.setCheckable(False)
item.setEditable(True)
self._fieldItems.appendRow(item)
if selectedField and field == selectedField:
selectedIndex = self._fieldItems.indexFromItem(item)
field = fieldIterator.next()
self.ui.field_listview.setModel(self._fieldItems)
self._fieldItems.itemChanged.connect(self.listItemEdited)
# self.ui.graphics_listview.setMovement(QtGui.QListView.Snap)
# self.ui.graphics_listview.setDragDropMode(QtGui.QListView.InternalMove)
# self.ui.graphics_listview.setDragDropOverwriteMode(False)
# self.ui.graphics_listview.setDropIndicatorShown(True)
if selectedIndex:
self.ui.field_listview.setCurrentIndex(selectedIndex)
self.ui.field_listview.show()
def _displayField(self):
if self._field and self._field.isValid():
selectedIndex = None
i = 0
# loop through the items until you get None, which
# means you've passed the end of the list
while self._fieldItems.item(i):
field = self._fieldItems.item(i).data()
if self._field == field:
selectedIndex = self._fieldItems.indexFromItem(self._fieldItems.item(i))
break
i += 1
if selectedIndex:
self.ui.field_listview.setCurrentIndex(selectedIndex)
name = self._field.getName()
fieldType = None
fieldTypeDict = self._neonRegion.getFieldTypeDict()
if name in fieldTypeDict:
fieldType = fieldTypeDict[name]
self.ui.field_editor.setField(self._field, fieldType)
else:
self.ui.field_editor.setField(self._field, None)
else:
self.field_listview.clearSelection()
self.ui.field_editor.setField(None, None)
def fieldListItemClicked(self, modelIndex):
model = modelIndex.model()
item = model.item(modelIndex.row())
field = item.data()
self._field = field
self._displayField()
def setField(self, field):
'''
Set the current selected field
'''
if not field or not field.isValid():
self._field = None
else:
self._field = field
self._displayField()
def addFieldClicked(self):
'''do the add field stuff'''
self.ui.field_editor.enterCreateMode() | unknown | codeparrot/codeparrot-clean | ||
"""Test helpers for Panasonic Viera."""
from unittest.mock import Mock, patch
from panasonic_viera import TV_TYPE_ENCRYPTED, TV_TYPE_NONENCRYPTED
import pytest
from homeassistant.components.panasonic_viera.const import (
ATTR_FRIENDLY_NAME,
ATTR_MANUFACTURER,
ATTR_MODEL_NUMBER,
ATTR_UDN,
CONF_APP_ID,
CONF_ENCRYPTION_KEY,
CONF_ON_ACTION,
DEFAULT_MANUFACTURER,
DEFAULT_MODEL_NUMBER,
DEFAULT_NAME,
DEFAULT_PORT,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
MOCK_BASIC_DATA = {
CONF_HOST: "0.0.0.0",
CONF_NAME: DEFAULT_NAME,
}
MOCK_CONFIG_DATA = {
**MOCK_BASIC_DATA,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: None,
}
MOCK_ENCRYPTION_DATA = {
CONF_APP_ID: "mock-app-id",
CONF_ENCRYPTION_KEY: "mock-encryption-key",
}
MOCK_DEVICE_INFO = {
ATTR_FRIENDLY_NAME: DEFAULT_NAME,
ATTR_MANUFACTURER: DEFAULT_MANUFACTURER,
ATTR_MODEL_NUMBER: DEFAULT_MODEL_NUMBER,
ATTR_UDN: "mock-unique-id",
}
def get_mock_remote(
request_error=None,
authorize_error=None,
encrypted=False,
app_id=None,
encryption_key=None,
device_info=MOCK_DEVICE_INFO,
):
"""Return a mock remote."""
mock_remote = Mock()
mock_remote.type = TV_TYPE_ENCRYPTED if encrypted else TV_TYPE_NONENCRYPTED
mock_remote.app_id = app_id
mock_remote.enc_key = encryption_key
def request_pin_code(name=None):
if request_error is not None:
raise request_error
mock_remote.request_pin_code = request_pin_code
def authorize_pin_code(pincode):
if pincode == "1234":
return
if authorize_error is not None:
raise authorize_error
mock_remote.authorize_pin_code = authorize_pin_code
def get_device_info():
return device_info
mock_remote.get_device_info = get_device_info
def send_key(key):
return
mock_remote.send_key = Mock(send_key)
def get_volume(key):
return 100
mock_remote.get_volume = Mock(get_volume)
return mock_remote
@pytest.fixture(name="mock_remote")
def mock_remote_fixture():
"""Patch the library remote."""
mock_remote = get_mock_remote()
with patch(
"homeassistant.components.panasonic_viera.RemoteControl",
return_value=mock_remote,
):
yield mock_remote | unknown | codeparrot/codeparrot-clean | ||
"""
Mazhalai
-------
Custom code interpreter.
:copyright: (c) 2017 by Gokul Sridhar.
:license: MIT
"""
import pprint
_pp = pprint.PrettyPrinter(indent=2)
def _print_node(node, indent, indent_symbol):
if isinstance(node, list):
for child in node:
for p in _print_node(child, indent, indent_symbol):
yield p
elif isinstance(node, int) or isinstance(node, float) or isinstance(node, str) or node is None:
yield ' {}'.format(node)
elif hasattr(node, '_fields'):
yield '\n{}{}'.format(indent_symbol * indent, type(node).__name__)
for field in node._fields:
yield '\n{}{}:'.format(indent_symbol * (indent + 1), field, ':')
for p in _print_node(getattr(node, field), indent + 2, indent_symbol):
yield p
else:
yield '\nError! Unable to print {}'.format(node)
def print_ast(node, indent=0, indent_symbol=' ' * 4):
print(''.join(_print_node(node, indent, indent_symbol)))
def print_tokens(tokens):
_pp.pprint(tokens)
def print_env(env):
_pp.pprint(env.asdict()) | unknown | codeparrot/codeparrot-clean | ||
# pylint: disable=missing-docstring
import ddt
from unittest import TestCase
from track import contexts
@ddt.ddt
class TestContexts(TestCase):
COURSE_ID = 'test/course_name/course_run'
SPLIT_COURSE_ID = 'course-v1:test+course_name+course_run'
ORG_ID = 'test'
@ddt.data(
(COURSE_ID, ''),
(COURSE_ID, '/more/stuff'),
(COURSE_ID, '?format=json'),
(SPLIT_COURSE_ID, ''),
(SPLIT_COURSE_ID, '/more/stuff'),
(SPLIT_COURSE_ID, '?format=json')
)
@ddt.unpack
def test_course_id_from_url(self, course_id, postfix):
url = 'http://foo.bar.com/courses/{}{}'.format(course_id, postfix)
self.assert_parses_course_id_from_url(url, course_id)
def assert_parses_course_id_from_url(self, format_string, course_id):
self.assertEquals(
contexts.course_context_from_url(format_string.format(course_id=course_id)),
{
'course_id': course_id,
'org_id': self.ORG_ID
}
)
def test_no_course_id_in_url(self):
self.assert_empty_context_for_url('http://foo.bar.com/dashboard')
def assert_empty_context_for_url(self, url):
self.assertEquals(
contexts.course_context_from_url(url),
{
'course_id': '',
'org_id': ''
}
)
@ddt.data('', '/', '/?', '?format=json')
def test_malformed_course_id(self, postfix):
self.assert_empty_context_for_url('http://foo.bar.com/courses/test/course_name{}'.format(postfix))
@ddt.data(
(COURSE_ID, ''),
(COURSE_ID, '/more/stuff'),
(COURSE_ID, '?format=json'),
(SPLIT_COURSE_ID, ''),
(SPLIT_COURSE_ID, '/more/stuff'),
(SPLIT_COURSE_ID, '?format=json')
)
@ddt.unpack
def test_course_id_later_in_url(self, course_id, postfix):
url = 'http://foo.bar.com/x/y/z/courses/{}{}'.format(course_id, postfix)
self.assert_parses_course_id_from_url(url, course_id)
def test_no_url(self):
self.assert_empty_context_for_url(None) | unknown | codeparrot/codeparrot-clean | ||
/* contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION jsonb_plpython3u" to load this file. \quit
CREATE FUNCTION jsonb_to_plpython3(val internal) RETURNS internal
LANGUAGE C STRICT IMMUTABLE
AS 'MODULE_PATHNAME', 'jsonb_to_plpython';
CREATE FUNCTION plpython3_to_jsonb(val internal) RETURNS jsonb
LANGUAGE C STRICT IMMUTABLE
AS 'MODULE_PATHNAME', 'plpython_to_jsonb';
CREATE TRANSFORM FOR jsonb LANGUAGE plpython3u (
FROM SQL WITH FUNCTION jsonb_to_plpython3(internal),
TO SQL WITH FUNCTION plpython3_to_jsonb(internal)
);
COMMENT ON TRANSFORM FOR jsonb LANGUAGE plpython3u IS 'transform between jsonb and Python'; | sql | github | https://github.com/postgres/postgres | contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql |
# frozen_string_literal: true
class BookIdentifier < ActiveRecord::Base
belongs_to :book
end | ruby | github | https://github.com/rails/rails | activerecord/test/models/book_identifier.rb |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for unarchive operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
creates = self._task.args.get('creates', None)
decrypt = self._task.args.get('decrypt', True)
try:
# "copy" is deprecated in favor of "remote_src".
if 'copy' in self._task.args:
# They are mutually exclusive.
if 'remote_src' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
# We will take the information from copy and store it in
# the remote_src var to use later in this file.
self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)
if source is None or dest is None:
raise AnsibleActionFail("src (or content) and dest are required")
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
creates = self._remote_expand_user(creates)
if self._remote_file_exists(creates):
raise AnsibleActionSkip("skipped, since %s exists" % creates)
dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if not remote_src:
try:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
try:
remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
if not remote_stat['exists'] or not remote_stat['isdir']:
raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
if not remote_src:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
self._transfer_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
if not remote_src:
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
original_basename=os.path.basename(source),
),
)
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
original_basename=os.path.basename(source),
),
)
# remove action plugin only key
for key in ('decrypt',):
if key in new_module_args:
del new_module_args[key]
# execute the unarchive module now, with the updated args
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue_sheet | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_LIB_MONITORING_CELL_READER_INL_H_
#define TENSORFLOW_CORE_LIB_MONITORING_CELL_READER_INL_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/lib/monitoring/cell_reader-inl.h"
#include "tensorflow/core/lib/monitoring/collected_metrics.h"
#include "tensorflow/core/lib/monitoring/metric_def.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
// NOLINTBEGIN(misc-unused-using-decls)
namespace tensorflow {
namespace monitoring {
namespace testing {
namespace internal {
using tsl::monitoring::testing::internal::CollectMetrics;
using tsl::monitoring::testing::internal::GetDelta;
using tsl::monitoring::testing::internal::GetLatestPoint;
using tsl::monitoring::testing::internal::GetLatestValueOrDefault;
using tsl::monitoring::testing::internal::GetMetricKind;
using tsl::monitoring::testing::internal::GetPoints;
using tsl::monitoring::testing::internal::GetValue;
} // namespace internal
} // namespace testing
} // namespace monitoring
} // namespace tensorflow
// NOLINTEND(misc-unused-using-decls)
#endif // TENSORFLOW_CORE_LIB_MONITORING_CELL_READER_INL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/lib/monitoring/cell_reader-inl.h |
# Frames
Each call to a Python function has an activation record, commonly known as a
"frame". It contains information about the function being executed, consisting
of three conceptual sections:
* Local variables (including arguments, cells and free variables)
* Evaluation stack
* Specials: The per-frame object references needed by the VM, including
globals dict, code object, instruction pointer, stack depth, the
previous frame, etc.
The definition of the `_PyInterpreterFrame` struct is in
[Include/internal/pycore_interpframe_structs.h](../Include/internal/pycore_interpframe_structs.h).
# Allocation
Python semantics allows frames to outlive the activation, so they need to
be allocated outside the C call stack. To reduce overhead and improve locality
of reference, most frames are allocated contiguously in a per-thread stack
(see `_PyThreadState_PushFrame` in [Python/pystate.c](../Python/pystate.c)).
Frames of generators and coroutines are embedded in the generator and coroutine
objects, so are not allocated in the per-thread stack. See `_PyGenObject` in
[Include/internal/pycore_interpframe_structs.h](../Include/internal/pycore_interpframe_structs.h).
## Layout
Each activation record is laid out as:
* Specials
* Locals
* Stack
This seems to provide the best performance without excessive complexity.
The specials have a fixed size, so the offset of the locals is known. The
interpreter needs to hold two pointers, a frame pointer and a stack pointer.
#### Alternative layout
An alternative layout that was used for part of 3.11 alpha was:
* Locals
* Specials
* Stack
This has the advantage that no copying is required when making a call,
as the arguments on the stack are (usually) already in the correct
location for the parameters. However, it requires the VM to maintain
an extra pointer for the locals, which can hurt performance.
### Specials
The specials section contains the following pointers:
* Globals dict
* Builtins dict
* Locals dict (not the "fast" locals, but the locals for eval and class creation)
* Code object
* Heap allocated `PyFrameObject` for this activation record, if any.
* The function.
The pointer to the function is not strictly required, but it is cheaper to
store a strong reference to the function and borrowed references to the globals
and builtins, than strong references to both globals and builtins.
### Frame objects
When creating a backtrace or when calling `sys._getframe()` the frame becomes
visible to Python code. When this happens a new `PyFrameObject` is created
and a strong reference to it is placed in the `frame_obj` field of the specials
section. The `frame_obj` field is initially `NULL`.
The `PyFrameObject` may outlive a stack-allocated `_PyInterpreterFrame`.
If it does then `_PyInterpreterFrame` is copied into the `PyFrameObject`,
except the evaluation stack which must be empty at this point.
The previous frame link is updated to reflect the new location of the frame.
This mechanism provides the appearance of persistent, heap-allocated
frames for each activation, but with low runtime overhead.
### Generators and Coroutines
Generators (objects of type `PyGen_Type`, `PyCoro_Type` or
`PyAsyncGen_Type`) have a `_PyInterpreterFrame` embedded in them, so
that they can be created with a single memory allocation.
When such an embedded frame is iterated or awaited, it can be linked with
frames on the per-thread stack via the linkage fields.
If a frame object associated with a generator outlives the generator, then
the embedded `_PyInterpreterFrame` is copied into the frame object (see
`take_ownership()` in [Python/frame.c](../Python/frame.c)).
### Field names
Many of the fields in `_PyInterpreterFrame` were copied from the 3.10 `PyFrameObject`.
Thus, some of the field names may be a bit misleading.
For example the `f_globals` field has a `f_` prefix implying it belongs to the
`PyFrameObject` struct, although it belongs to the `_PyInterpreterFrame` struct.
We may rationalize this naming scheme for a later version.
### Shim frames
On entry to `_PyEval_EvalFrameDefault()` a shim `_PyInterpreterFrame` is pushed.
This frame is stored on the C stack, and popped when `_PyEval_EvalFrameDefault()`
returns. This extra frame is inserted so that `RETURN_VALUE`, `YIELD_VALUE`, and
`RETURN_GENERATOR` do not need to check whether the current frame is the entry frame.
The shim frame points to a special code object containing the `INTERPRETER_EXIT`
instruction which cleans up the shim frame and returns.
### Base frame
Each thread state contains an embedded `_PyInterpreterFrame` called the "base frame"
that serves as a sentinel at the bottom of the frame stack. This frame is allocated
in `_PyThreadStateImpl` (the internal extension of `PyThreadState`) and initialized
when the thread state is created. The `owner` field is set to `FRAME_OWNED_BY_INTERPRETER`.
External profilers and sampling tools can validate that they have successfully unwound
the complete call stack by checking that the frame chain terminates at the base frame.
The `PyThreadState.base_frame` pointer provides the expected address to compare against.
If a stack walk doesn't reach this frame, the sample is incomplete (possibly due to a
race condition) and should be discarded.
The base frame is embedded in `_PyThreadStateImpl` rather than `PyThreadState` because
`_PyInterpreterFrame` is defined in internal headers that cannot be exposed in the
public API. A pointer (`PyThreadState.base_frame`) is provided for profilers to access
the address without needing internal headers.
See the initialization in `new_threadstate()` in [Python/pystate.c](../Python/pystate.c).
#### How profilers should use the base frame
External profilers should read `tstate->base_frame` before walking the stack, then
walk from `tstate->current_frame` following `frame->previous` pointers until reaching
a frame with `owner == FRAME_OWNED_BY_INTERPRETER`. After the walk, verify that the
last frame address matches `base_frame`. If not, discard the sample as incomplete
since the frame chain may have been in an inconsistent state due to concurrent updates.
### Remote Profiling Frame Cache
The `last_profiled_frame` field in `PyThreadState` supports an optimization for
remote profilers that sample call stacks from external processes. When a remote
profiler reads the call stack, it writes the current frame address to this field.
The eval loop then keeps this pointer valid by updating it to the parent frame
whenever a frame returns (in `_PyEval_FrameClearAndPop`).
This creates a "high-water mark" that always points to a frame still on the stack.
On subsequent samples, the profiler can walk from `current_frame` until it reaches
`last_profiled_frame`, knowing that frames from that point downward are unchanged
and can be retrieved from a cache. This significantly reduces the amount of remote
memory reads needed when call stacks are deep and stable at their base.
The update in `_PyEval_FrameClearAndPop` is guarded: it only writes when
`last_profiled_frame` is non-NULL AND matches the frame being popped. This
prevents transient frames (called and returned between profiler samples) from
corrupting the cache pointer, while avoiding any overhead when profiling is inactive.
### The Instruction Pointer
`_PyInterpreterFrame` has two fields which are used to maintain the instruction
pointer: `instr_ptr` and `return_offset`.
When a frame is executing, `instr_ptr` points to the instruction currently being
executed. In a suspended frame, it points to the instruction that would execute
if the frame were to resume. After `frame.f_lineno` is set, `instr_ptr` points to
the next instruction to be executed. During a call to a python function,
`instr_ptr` points to the call instruction, because this is what we would expect
to see in an exception traceback.
The `return_offset` field determines where a `RETURN` should go in the caller,
relative to `instr_ptr`. It is only meaningful to the callee, so it needs to
be set in any instruction that implements a call (to a Python function),
including CALL, SEND and BINARY_OP_SUBSCR_GETITEM, among others. If there is no
callee, then return_offset is meaningless. It is necessary to have a separate
field for the return offset because (1) if we apply this offset to `instr_ptr`
while executing the `RETURN`, this is too early and would lose us information
about the previous instruction which we could need for introspecting and
debugging. (2) `SEND` needs to pass two offsets to the generator: one for
`RETURN` and one for `YIELD`. It uses the `oparg` for one, and the
`return_offset` for the other. | unknown | github | https://github.com/python/cpython | InternalDocs/frames.md |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.clients.Metadata.LeaderAndEpoch;
import org.apache.kafka.clients.MockClient;
import org.apache.kafka.clients.NodeApiVersions;
import org.apache.kafka.clients.consumer.CloseOptions;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.GroupProtocol;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.clients.consumer.SubscriptionPattern;
import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent;
import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler;
import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent;
import org.apache.kafka.clients.consumer.internals.events.AsyncCommitEvent;
import org.apache.kafka.clients.consumer.internals.events.AsyncPollEvent;
import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent;
import org.apache.kafka.clients.consumer.internals.events.CommitEvent;
import org.apache.kafka.clients.consumer.internals.events.CommitOnCloseEvent;
import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent;
import org.apache.kafka.clients.consumer.internals.events.CompletableBackgroundEvent;
import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper;
import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackNeededEvent;
import org.apache.kafka.clients.consumer.internals.events.ErrorEvent;
import org.apache.kafka.clients.consumer.internals.events.EventProcessor;
import org.apache.kafka.clients.consumer.internals.events.FetchCommittedOffsetsEvent;
import org.apache.kafka.clients.consumer.internals.events.LeaveGroupOnCloseEvent;
import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent;
import org.apache.kafka.clients.consumer.internals.events.ResetOffsetEvent;
import org.apache.kafka.clients.consumer.internals.events.SeekUnvalidatedEvent;
import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent;
import org.apache.kafka.clients.consumer.internals.events.TopicPatternSubscriptionChangeEvent;
import org.apache.kafka.clients.consumer.internals.events.TopicRe2JPatternSubscriptionChangeEvent;
import org.apache.kafka.clients.consumer.internals.events.TopicSubscriptionChangeEvent;
import org.apache.kafka.clients.consumer.internals.events.UnsubscribeEvent;
import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.errors.GroupAuthorizationException;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.errors.InvalidGroupIdException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.ConsumerGroupHeartbeatResponse;
import org.apache.kafka.common.requests.FindCoordinatorResponse;
import org.apache.kafka.common.requests.JoinGroupRequest;
import org.apache.kafka.common.requests.ListOffsetsRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RequestTestUtils;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.utils.LogCaptureAppender;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Timer;
import org.apache.kafka.test.MockConsumerInterceptor;
import org.apache.kafka.test.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.function.Executable;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatchers;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import static java.util.Arrays.asList;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonList;
import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR;
import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_ASSIGNED;
import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_LOST;
import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_REVOKED;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED;
import static org.apache.kafka.common.utils.Utils.mkEntry;
import static org.apache.kafka.common.utils.Utils.mkMap;
import static org.apache.kafka.test.TestUtils.requiredConsumerConfig;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.clearInvocations;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.mockStatic;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@SuppressWarnings("unchecked")
public class AsyncKafkaConsumerTest {
private AsyncKafkaConsumer<String, String> consumer = null;
private Time time = new MockTime(0);
private final Metrics metrics = new Metrics();
private final FetchCollector<String, String> fetchCollector = mock(FetchCollector.class);
private final ApplicationEventHandler applicationEventHandler = mock(ApplicationEventHandler.class);
private final ConsumerMetadata metadata = mock(ConsumerMetadata.class);
private final LinkedBlockingQueue<BackgroundEvent> backgroundEventQueue = new LinkedBlockingQueue<>();
private final CompletableEventReaper backgroundEventReaper = mock(CompletableEventReaper.class);
@AfterEach
public void resetAll() {
backgroundEventQueue.clear();
if (consumer != null) {
try {
consumer.close(CloseOptions.timeout(Duration.ZERO));
} catch (Exception swallow) {
// best effort to clean up after each test, but may throw (ex. if callbacks were
// throwing errors)
}
}
consumer = null;
Mockito.framework().clearInlineMocks();
MockConsumerInterceptor.resetCounters();
}
private AsyncKafkaConsumer<String, String> newConsumer() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id");
return newConsumer(props);
}
private AsyncKafkaConsumer<String, String> newConsumerWithoutGroupId() {
final Properties props = requiredConsumerConfig();
return newConsumer(props);
}
private AsyncKafkaConsumer<String, String> newConsumer(Properties props) {
return newConsumerWithStreamRebalanceData(props, null);
}
private AsyncKafkaConsumer<String, String> newConsumerWithStreamRebalanceData(
Properties props,
StreamsRebalanceData streamsRebalanceData
) {
// disable auto-commit by default, so we don't need to handle SyncCommitEvent for each case
if (!props.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) {
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
}
final ConsumerConfig config = new ConsumerConfig(props);
return new AsyncKafkaConsumer<>(
config,
new StringDeserializer(),
new StringDeserializer(),
time,
(logContext, time, initializationTimeoutMs, applicationEventBlockingQueue, completableEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics) -> applicationEventHandler,
logContext -> backgroundEventReaper,
(logContext, consumerMetadata, subscriptionState, fetchConfig, deserializers, fetchMetricsManager, time) -> fetchCollector,
(consumerConfig, subscriptionState, logContext, clusterResourceListeners) -> metadata,
backgroundEventQueue,
Optional.ofNullable(streamsRebalanceData)
);
}
private AsyncKafkaConsumer<String, String> newConsumer(ConsumerConfig config) {
return new AsyncKafkaConsumer<>(
config,
new StringDeserializer(),
new StringDeserializer(),
time,
(logContext, time, initializationTimeoutMs, applicationEventBlockingQueue, completableEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics) -> applicationEventHandler,
logContext -> backgroundEventReaper,
(logContext, consumerMetadata, subscriptionState, fetchConfig, deserializers, fetchMetricsManager, time) -> fetchCollector,
(consumerConfig, subscriptionState, logContext, clusterResourceListeners) -> metadata,
backgroundEventQueue,
Optional.empty()
);
}
private AsyncKafkaConsumer<String, String> newConsumer(
FetchBuffer fetchBuffer,
ConsumerInterceptors<String, String> interceptors,
ConsumerRebalanceListenerInvoker rebalanceListenerInvoker,
SubscriptionState subscriptions) {
long retryBackoffMs = 100L;
int requestTimeoutMs = 30000;
int defaultApiTimeoutMs = 1000;
return new AsyncKafkaConsumer<>(
new LogContext(),
"client-id",
new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics),
fetchBuffer,
fetchCollector,
interceptors,
time,
applicationEventHandler,
backgroundEventQueue,
backgroundEventReaper,
rebalanceListenerInvoker,
metrics,
subscriptions,
metadata,
retryBackoffMs,
requestTimeoutMs,
defaultApiTimeoutMs,
"group-id",
false,
new PositionsValidator(new LogContext(), time, subscriptions, metadata));
}
@Test
public void testSuccessfulStartupShutdown() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
assertDoesNotThrow(() -> consumer.close());
}
@Test
public void testFailOnClosedConsumer() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.close();
final IllegalStateException res = assertThrows(IllegalStateException.class, consumer::assignment);
assertEquals("This consumer has already been closed.", res.getMessage());
}
@Test
public void testCommitAsyncWithNullCallback() {
consumer = newConsumer();
final TopicPartition t0 = new TopicPartition("t0", 2);
final TopicPartition t1 = new TopicPartition("t0", 3);
HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(t0, new OffsetAndMetadata(10L));
offsets.put(t1, new OffsetAndMetadata(20L));
markOffsetsReadyForCommitEvent();
consumer.commitAsync(offsets, null);
final ArgumentCaptor<AsyncCommitEvent> commitEventCaptor = ArgumentCaptor.forClass(AsyncCommitEvent.class);
verify(applicationEventHandler).add(commitEventCaptor.capture());
final AsyncCommitEvent commitEvent = commitEventCaptor.getValue();
assertTrue(commitEvent.offsets().isPresent());
assertEquals(offsets, commitEvent.offsets().get());
commitEvent.future().complete(offsets);
assertDoesNotThrow(() -> consumer.commitAsync(offsets, null));
// Clean-up. Close the consumer here as we know it will cause a TimeoutException to be thrown.
// If we get an error *other* than the TimeoutException, we'll fail the test.
try {
Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ZERO)));
assertInstanceOf(TimeoutException.class, e.getCause());
} finally {
consumer = null;
}
}
@Test
public void testCommitAsyncUserSuppliedCallbackNoException() {
consumer = newConsumer();
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(new TopicPartition("my-topic", 1), new OffsetAndMetadata(200L));
completeCommitAsyncApplicationEventSuccessfully();
MockCommitCallback callback = new MockCommitCallback();
assertDoesNotThrow(() -> consumer.commitAsync(offsets, callback));
forceCommitCallbackInvocation();
assertEquals(1, callback.invoked);
assertNull(callback.exception);
}
@ParameterizedTest
@MethodSource("commitExceptionSupplier")
public void testCommitAsyncUserSuppliedCallbackWithException(Exception exception) {
consumer = newConsumer();
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(new TopicPartition("my-topic", 1), new OffsetAndMetadata(200L));
completeCommitAsyncApplicationEventExceptionally(exception);
MockCommitCallback callback = new MockCommitCallback();
assertDoesNotThrow(() -> consumer.commitAsync(offsets, callback));
forceCommitCallbackInvocation();
assertSame(exception.getClass(), callback.exception.getClass());
}
@Test
public void testCommitAsyncShouldCopyOffsets() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("t0", 2);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp, new OffsetAndMetadata(10L));
markOffsetsReadyForCommitEvent();
consumer.commitAsync(offsets, null);
final ArgumentCaptor<AsyncCommitEvent> commitEventCaptor = ArgumentCaptor.forClass(AsyncCommitEvent.class);
verify(applicationEventHandler).add(commitEventCaptor.capture());
final AsyncCommitEvent commitEvent = commitEventCaptor.getValue();
assertTrue(commitEvent.offsets().isPresent());
assertTrue(commitEvent.offsets().get().containsKey(tp));
offsets.remove(tp);
assertTrue(commitEvent.offsets().get().containsKey(tp));
}
private static Stream<Exception> commitExceptionSupplier() {
return Stream.of(
new KafkaException("Test exception"),
new GroupAuthorizationException("Group authorization exception"));
}
@Test
public void testCommitted() {
time = new MockTime(1);
consumer = newConsumer();
Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsets = mockTopicPartitionOffset();
completeFetchedCommittedOffsetApplicationEventSuccessfully(topicPartitionOffsets);
assertEquals(topicPartitionOffsets, consumer.committed(topicPartitionOffsets.keySet(), Duration.ofMillis(1000)));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(FetchCommittedOffsetsEvent.class));
final Metric metric = consumer.metrics()
.get(consumer.metricsRegistry().metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP));
assertTrue((double) metric.metricValue() > 0);
}
@Test
public void testCommittedExceptionThrown() {
consumer = newConsumer();
Map<TopicPartition, OffsetAndMetadata> offsets = mockTopicPartitionOffset();
when(applicationEventHandler.addAndGet(
any(FetchCommittedOffsetsEvent.class))).thenAnswer(invocation -> {
CompletableApplicationEvent<?> event = invocation.getArgument(0);
assertInstanceOf(FetchCommittedOffsetsEvent.class, event);
throw new KafkaException("Test exception");
});
assertThrows(KafkaException.class, () -> consumer.committed(offsets.keySet(), Duration.ofMillis(1000)));
}
@Test
public void testWakeupBeforeCallingPoll() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
consumer.wakeup();
completeAsyncPollEventSuccessfully();
assertThrows(WakeupException.class, () -> consumer.poll(Duration.ZERO));
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
@Test
public void testWakeupAfterEmptyFetch() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
doAnswer(invocation -> {
consumer.wakeup();
return Fetch.empty();
}).doAnswer(invocation -> Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
completeAsyncPollEventSuccessfully();
assertThrows(WakeupException.class, () -> consumer.poll(Duration.ofMinutes(1)));
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
@Test
public void testWakeupAfterNonEmptyFetch() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
final List<ConsumerRecord<String, String>> records = asList(
new ConsumerRecord<>(topicName, partition, 2, "key1", "value1"),
new ConsumerRecord<>(topicName, partition, 3, "key2", "value2")
);
doAnswer(invocation -> {
consumer.wakeup();
return Fetch.forPartition(tp, records, true, new OffsetAndMetadata(4, Optional.of(0), ""));
}).when(fetchCollector).collectFetch(Mockito.any(FetchBuffer.class));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
completeAsyncPollEventSuccessfully();
// since wakeup() is called when the non-empty fetch is returned the wakeup should be ignored
assertDoesNotThrow(() -> consumer.poll(Duration.ofMinutes(1)));
// the previously ignored wake-up should not be ignored in the next call
assertThrows(WakeupException.class, () -> consumer.poll(Duration.ZERO));
}
@Test
public void testCommitInRebalanceCallback() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
doAnswer(invocation -> Fetch.empty()).when(fetchCollector).collectFetch(Mockito.any(FetchBuffer.class));
SortedSet<TopicPartition> sortedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR);
sortedPartitions.add(tp);
CompletableBackgroundEvent<Void> e = new ConsumerRebalanceListenerCallbackNeededEvent(ON_PARTITIONS_REVOKED, sortedPartitions);
backgroundEventQueue.add(e);
completeCommitSyncApplicationEventSuccessfully();
final AtomicBoolean callbackExecuted = new AtomicBoolean(false);
ConsumerRebalanceListener listener = new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(final Collection<TopicPartition> partitions) {
assertDoesNotThrow(() -> consumer.commitSync(mkMap(mkEntry(tp, new OffsetAndMetadata(0)))));
callbackExecuted.set(true);
}
@Override
public void onPartitionsAssigned(final Collection<TopicPartition> partitions) {
// no-op
}
};
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(Collections.singletonList(topicName), listener);
completeAsyncPollEventSuccessfully();
consumer.poll(Duration.ZERO);
assertTrue(callbackExecuted.get());
}
@Test
public void testClearWakeupTriggerAfterPoll() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
final List<ConsumerRecord<String, String>> records = asList(
new ConsumerRecord<>(topicName, partition, 2, "key1", "value1"),
new ConsumerRecord<>(topicName, partition, 3, "key2", "value2")
);
doReturn(Fetch.forPartition(tp, records, true, new OffsetAndMetadata(4, Optional.of(0), "")))
.when(fetchCollector).collectFetch(any(FetchBuffer.class));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
completeAsyncPollEventSuccessfully();
consumer.poll(Duration.ZERO);
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
@Test
public void testEnsureCallbackExecutedByApplicationThread() {
consumer = newConsumer();
final String currentThread = Thread.currentThread().getName();
MockCommitCallback callback = new MockCommitCallback();
completeCommitAsyncApplicationEventSuccessfully();
assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback));
forceCommitCallbackInvocation();
assertEquals(1, callback.invoked);
assertEquals(currentThread, callback.completionThread);
}
@Test
public void testEnsureCommitSyncExecutedCommitAsyncCallbacks() {
consumer = newConsumer();
KafkaException callbackException = new KafkaException("Async commit callback failed");
OffsetCommitCallback callback = (offsets, exception) -> {
throw callbackException;
};
assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback));
assertThrows(callbackException.getClass(), () -> consumer.commitSync());
}
@Test
public void testCommitSyncAwaitsCommitAsyncCompletionWithEmptyOffsets() {
final TopicPartition tp = new TopicPartition("foo", 0);
final CompletableFuture<Void> asyncCommitFuture = setUpConsumerWithIncompleteAsyncCommit(tp);
// Commit async is not completed yet, so commit sync should wait for it to complete (time out)
assertThrows(TimeoutException.class, () -> consumer.commitSync(Collections.emptyMap(), Duration.ofMillis(100)));
// Complete exceptionally async commit event
asyncCommitFuture.completeExceptionally(new KafkaException("Test exception"));
// Commit async is completed, so commit sync completes immediately (since offsets are empty)
assertDoesNotThrow(() -> consumer.commitSync(Collections.emptyMap(), Duration.ofMillis(100)));
}
@Test
public void testCommitSyncAwaitsCommitAsyncCompletionWithNonEmptyOffsets() {
final TopicPartition tp = new TopicPartition("foo", 0);
final CompletableFuture<Void> asyncCommitFuture = setUpConsumerWithIncompleteAsyncCommit(tp);
// Mock to complete sync event
completeCommitSyncApplicationEventSuccessfully();
// Commit async is not completed yet, so commit sync should wait for it to complete (time out)
assertThrows(TimeoutException.class, () -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
// Complete async commit event
asyncCommitFuture.complete(null);
// Commit async is completed, so commit sync does not need to wait before committing its offsets
assertDoesNotThrow(() -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
}
@Test
public void testCommitSyncAwaitsCommitAsyncButDoesNotFail() {
final TopicPartition tp = new TopicPartition("foo", 0);
final CompletableFuture<Void> asyncCommitFuture = setUpConsumerWithIncompleteAsyncCommit(tp);
// Mock to complete sync event
completeCommitSyncApplicationEventSuccessfully();
// Commit async is not completed yet, so commit sync should wait for it to complete (time out)
assertThrows(TimeoutException.class, () -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
// Complete exceptionally async commit event
asyncCommitFuture.completeExceptionally(new KafkaException("Test exception"));
// Commit async is completed exceptionally, but this will be handled by commit callback - commit sync should not fail.
assertDoesNotThrow(() -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
}
@Test
public void testCommitSyncShouldCopyOffsets() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("t0", 2);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp, new OffsetAndMetadata(10L));
completeCommitSyncApplicationEventSuccessfully();
consumer.commitSync(offsets);
final ArgumentCaptor<SyncCommitEvent> commitEventCaptor = ArgumentCaptor.forClass(SyncCommitEvent.class);
verify(applicationEventHandler).add(commitEventCaptor.capture());
final SyncCommitEvent commitEvent = commitEventCaptor.getValue();
assertTrue(commitEvent.offsets().isPresent());
assertTrue(commitEvent.offsets().get().containsKey(tp));
offsets.remove(tp);
assertTrue(commitEvent.offsets().get().containsKey(tp));
}
private CompletableFuture<Void> setUpConsumerWithIncompleteAsyncCommit(TopicPartition tp) {
time = new MockTime(1);
consumer = newConsumer();
// Commit async (incomplete)
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 20);
markOffsetsReadyForCommitEvent();
consumer.commitAsync();
CompletableApplicationEvent<Void> event = getLastEnqueuedEvent();
return event.future();
}
// ArgumentCaptor's type-matching does not work reliably with Java 8, so we cannot directly capture the AsyncCommitEvent
// Instead, we capture the super-class CompletableApplicationEvent and fetch the last captured event.
private <T> CompletableApplicationEvent<T> getLastEnqueuedEvent() {
final ArgumentCaptor<CompletableApplicationEvent<T>> eventArgumentCaptor = ArgumentCaptor.forClass(CompletableApplicationEvent.class);
verify(applicationEventHandler, atLeast(1)).add(eventArgumentCaptor.capture());
final List<CompletableApplicationEvent<T>> allValues = eventArgumentCaptor.getAllValues();
return allValues.get(allValues.size() - 1);
}
private <T> CompletableApplicationEvent<T> addAndGetLastEnqueuedEvent() {
final ArgumentCaptor<CompletableApplicationEvent<T>> eventArgumentCaptor = ArgumentCaptor.forClass(CompletableApplicationEvent.class);
verify(applicationEventHandler, atLeast(1)).addAndGet(eventArgumentCaptor.capture());
final List<CompletableApplicationEvent<T>> allValues = eventArgumentCaptor.getAllValues();
return allValues.get(allValues.size() - 1);
}
@Test
public void testEnsurePollExecutedCommitAsyncCallbacks() {
consumer = newConsumer();
MockCommitCallback callback = new MockCommitCallback();
completeCommitAsyncApplicationEventSuccessfully();
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(new TopicPartition("foo", 0)));
assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback));
completeAsyncPollEventSuccessfully();
assertMockCommitCallbackInvoked(() -> consumer.poll(Duration.ZERO), callback);
}
@Test
public void testEnsureShutdownExecutedCommitAsyncCallbacks() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
MockCommitCallback callback = new MockCommitCallback();
completeCommitAsyncApplicationEventSuccessfully();
assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback));
assertMockCommitCallbackInvoked(() -> consumer.close(), callback);
}
@Test
public void testVerifyApplicationEventOnShutdown() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
doReturn(null).when(applicationEventHandler).addAndGet(any());
consumer.close();
verify(applicationEventHandler).add(any(CommitOnCloseEvent.class));
verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class));
}
@ParameterizedTest
@ValueSource(longs = {0, ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS})
public void testCloseLeavesGroup(long timeoutMs) {
SubscriptionState subscriptions = mock(SubscriptionState.class);
consumer = spy(newConsumer(
mock(FetchBuffer.class),
mock(ConsumerInterceptors.class),
mock(ConsumerRebalanceListenerInvoker.class),
subscriptions));
consumer.close(CloseOptions.timeout(Duration.ofMillis(timeoutMs)));
verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class));
}
@Test
public void testCloseLeavesGroupDespiteOnPartitionsLostError() {
// If rebalance listener failed to execute during close, we still send the leave group,
// and proceed with closing the consumer.
Throwable rootError = new KafkaException("Intentional error");
Set<TopicPartition> partitions = singleton(new TopicPartition("topic1", 0));
SubscriptionState subscriptions = mock(SubscriptionState.class);
when(subscriptions.assignedPartitions()).thenReturn(partitions);
ConsumerRebalanceListenerInvoker invoker = mock(ConsumerRebalanceListenerInvoker.class);
doAnswer(invocation -> rootError).when(invoker).invokePartitionsLost(any(SortedSet.class));
consumer = spy(newConsumer(
mock(FetchBuffer.class),
new ConsumerInterceptors<>(Collections.emptyList(), metrics),
invoker,
subscriptions));
consumer.setGroupAssignmentSnapshot(partitions);
Throwable t = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ZERO)));
assertNotNull(t.getCause());
assertEquals(rootError, t.getCause());
verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class));
}
@ParameterizedTest
@ValueSource(longs = {0, ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS})
public void testCloseLeavesGroupDespiteInterrupt(long timeoutMs) {
Set<TopicPartition> partitions = singleton(new TopicPartition("topic1", 0));
SubscriptionState subscriptions = mock(SubscriptionState.class);
when(subscriptions.assignedPartitions()).thenReturn(partitions);
when(applicationEventHandler.addAndGet(any(CompletableApplicationEvent.class))).thenThrow(InterruptException.class);
consumer = spy(newConsumer(
mock(FetchBuffer.class),
mock(ConsumerInterceptors.class),
mock(ConsumerRebalanceListenerInvoker.class),
subscriptions));
Duration timeout = Duration.ofMillis(timeoutMs);
try {
assertThrows(InterruptException.class, () -> consumer.close(CloseOptions.timeout(timeout)));
} finally {
Thread.interrupted();
}
verify(applicationEventHandler).add(any(CommitOnCloseEvent.class));
verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class));
}
@Test
public void testCommitSyncAllConsumed() {
SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE);
consumer = newConsumer(
mock(FetchBuffer.class),
mock(ConsumerInterceptors.class),
mock(ConsumerRebalanceListenerInvoker.class),
subscriptions);
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class));
subscriptions.assignFromSubscribed(singleton(new TopicPartition("topic", 0)));
completeSeekUnvalidatedEventSuccessfully();
subscriptions.seek(new TopicPartition("topic", 0), 100);
markOffsetsReadyForCommitEvent();
consumer.commitSyncAllConsumed(time.timer(100));
ArgumentCaptor<SyncCommitEvent> eventCaptor = ArgumentCaptor.forClass(SyncCommitEvent.class);
verify(applicationEventHandler).add(eventCaptor.capture());
SyncCommitEvent capturedEvent = eventCaptor.getValue();
assertFalse(capturedEvent.offsets().isPresent(), "Expected empty optional offsets");
}
@Test
public void testAutoCommitSyncDisabled() {
SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE);
consumer = newConsumer(
mock(FetchBuffer.class),
mock(ConsumerInterceptors.class),
mock(ConsumerRebalanceListenerInvoker.class),
subscriptions);
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class));
subscriptions.assignFromSubscribed(singleton(new TopicPartition("topic", 0)));
completeSeekUnvalidatedEventSuccessfully();
subscriptions.seek(new TopicPartition("topic", 0), 100);
completeUnsubscribeApplicationEventSuccessfully();
consumer.close();
verify(applicationEventHandler, never()).add(any(SyncCommitEvent.class));
}
private void assertMockCommitCallbackInvoked(final Executable task, final MockCommitCallback callback) {
assertDoesNotThrow(task);
assertEquals(1, callback.invoked);
assertNull(callback.exception);
}
private static class MockCommitCallback implements OffsetCommitCallback {
public int invoked = 0;
public Exception exception = null;
public String completionThread;
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
invoked++;
this.completionThread = Thread.currentThread().getName();
this.exception = exception;
}
}
@Test
public void testAssign() {
consumer = newConsumer();
final TopicPartition tp = new TopicPartition("foo", 3);
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().contains(tp));
verify(applicationEventHandler).addAndGet(any(AssignmentChangeEvent.class));
}
@Test
public void testAssignOnNullTopicPartition() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.assign(null));
}
@Test
public void testAssignOnEmptyTopicPartition() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.assign(Collections.emptyList());
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
}
@Test
public void testAssignOnNullTopicInPartition() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(null, 0))));
}
@Test
public void testAssignOnEmptyTopicInPartition() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(" ", 0))));
}
@Test
public void testBeginningOffsetsFailsIfNullPartitions() {
consumer = newConsumer();
assertThrows(NullPointerException.class, () -> consumer.beginningOffsets(null,
Duration.ofMillis(1)));
}
@Test
public void testBeginningOffsets() {
consumer = newConsumer();
Map<TopicPartition, OffsetAndTimestampInternal> expectedOffsets = mockOffsetAndTimestamp();
when(applicationEventHandler.addAndGet(any(ListOffsetsEvent.class))).thenAnswer(invocation -> {
ListOffsetsEvent event = invocation.getArgument(0);
Timer timer = time.timer(event.deadlineMs() - time.milliseconds());
if (timer.remainingMs() == 0) {
fail("Timer duration should not be zero.");
}
return expectedOffsets;
});
Map<TopicPartition, Long> result = assertDoesNotThrow(() -> consumer.beginningOffsets(expectedOffsets.keySet(), Duration.ofMillis(1)));
expectedOffsets.forEach((key, value) -> {
assertTrue(result.containsKey(key));
assertEquals(value.offset(), result.get(key));
});
verify(applicationEventHandler).addAndGet(any(ListOffsetsEvent.class));
}
@Test
public void testBeginningOffsetsThrowsKafkaExceptionForUnderlyingExecutionFailure() {
consumer = newConsumer();
Set<TopicPartition> partitions = mockTopicPartitionOffset().keySet();
Throwable eventProcessingFailure = new KafkaException("Unexpected failure " +
"processing List Offsets event");
doThrow(eventProcessingFailure).when(applicationEventHandler).addAndGet(
any(ListOffsetsEvent.class));
Throwable consumerError = assertThrows(KafkaException.class,
() -> consumer.beginningOffsets(partitions,
Duration.ofMillis(1)));
assertEquals(eventProcessingFailure, consumerError);
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testBeginningOffsetsTimeoutOnEventProcessingTimeout() {
consumer = newConsumer();
doThrow(new TimeoutException()).when(applicationEventHandler).addAndGet(any());
assertThrows(TimeoutException.class,
() -> consumer.beginningOffsets(
Collections.singletonList(new TopicPartition("t1", 0)),
Duration.ofMillis(1)));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testOffsetsForTimesOnNullPartitions() {
consumer = newConsumer();
assertThrows(NullPointerException.class, () -> consumer.offsetsForTimes(null,
Duration.ofMillis(1)));
}
@Test
public void testOffsetsForTimesFailsOnNegativeTargetTimes() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class,
() -> consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(
"topic1", 1), ListOffsetsRequest.EARLIEST_TIMESTAMP),
Duration.ofMillis(1)));
assertThrows(IllegalArgumentException.class,
() -> consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(
"topic1", 1), ListOffsetsRequest.LATEST_TIMESTAMP),
Duration.ofMillis(1)));
assertThrows(IllegalArgumentException.class,
() -> consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(
"topic1", 1), ListOffsetsRequest.MAX_TIMESTAMP),
Duration.ofMillis(1)));
}
@Test
public void testOffsetsForTimes() {
consumer = newConsumer();
Map<TopicPartition, OffsetAndTimestampInternal> expectedResult = mockOffsetAndTimestamp();
Map<TopicPartition, Long> timestampToSearch = mockTimestampToSearch();
doReturn(expectedResult).when(applicationEventHandler).addAndGet(any());
Map<TopicPartition, OffsetAndTimestamp> result =
assertDoesNotThrow(() -> consumer.offsetsForTimes(timestampToSearch, Duration.ofMillis(1)));
expectedResult.forEach((key, value) -> {
OffsetAndTimestamp expected = value.buildOffsetAndTimestamp();
assertEquals(expected, result.get(key));
});
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testOffsetsForTimesTimeoutException() {
consumer = newConsumer();
long timeout = 100;
doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper"))
.when(applicationEventHandler).addAndGet(any());
Throwable t = assertThrows(
TimeoutException.class,
() -> consumer.offsetsForTimes(mockTimestampToSearch(), Duration.ofMillis(timeout)));
assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage());
}
@Test
public void testBeginningOffsetsTimeoutException() {
consumer = newConsumer();
long timeout = 100;
doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper"))
.when(applicationEventHandler).addAndGet(any());
Throwable t = assertThrows(
TimeoutException.class,
() -> consumer.beginningOffsets(Collections.singleton(new TopicPartition("topic", 5)),
Duration.ofMillis(timeout)));
assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage());
}
@Test
public void testEndOffsetsTimeoutException() {
consumer = newConsumer();
long timeout = 100;
doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper"))
.when(applicationEventHandler).addAndGet(any());
Throwable t = assertThrows(
TimeoutException.class,
() -> consumer.endOffsets(Collections.singleton(new TopicPartition("topic", 5)),
Duration.ofMillis(timeout)));
assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage());
}
// This test ensures same behaviour as the current consumer when offsetsForTimes is called
// with 0 timeout. It should return map with all requested partitions as keys, with null
// OffsetAndTimestamp as value.
@Test
public void testBeginningOffsetsWithZeroTimeout() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("topic1", 0);
Map<TopicPartition, Long> result =
assertDoesNotThrow(() -> consumer.beginningOffsets(Collections.singletonList(tp), Duration.ZERO));
assertNotNull(result);
assertEquals(0, result.size());
verify(applicationEventHandler).add(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testOffsetsForTimesWithZeroTimeout() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("topic1", 0);
Map<TopicPartition, OffsetAndTimestamp> expectedResult = Collections.singletonMap(tp, null);
Map<TopicPartition, Long> timestampToSearch = Collections.singletonMap(tp, 5L);
Map<TopicPartition, OffsetAndTimestamp> result =
assertDoesNotThrow(() -> consumer.offsetsForTimes(timestampToSearch, Duration.ZERO));
assertEquals(expectedResult, result);
verify(applicationEventHandler, never()).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testWakeupCommitted() {
consumer = newConsumer();
final Map<TopicPartition, OffsetAndMetadata> offsets = mockTopicPartitionOffset();
doAnswer(invocation -> {
CompletableApplicationEvent<?> event = invocation.getArgument(0);
assertInstanceOf(FetchCommittedOffsetsEvent.class, event);
assertTrue(event.future().isCompletedExceptionally());
return ConsumerUtils.getResult(event.future());
})
.when(applicationEventHandler)
.addAndGet(any(FetchCommittedOffsetsEvent.class));
consumer.wakeup();
assertThrows(WakeupException.class, () -> consumer.committed(offsets.keySet()));
assertNull(consumer.wakeupTrigger().getPendingTask());
}
@Test
public void testNoWakeupInCloseCommit() {
TopicPartition tp = new TopicPartition("topic1", 0);
Properties props = requiredConsumerConfigAndGroupId("consumer-group");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
consumer = newConsumer(props);
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 10);
consumer.wakeup();
AtomicReference<SyncCommitEvent> capturedEvent = new AtomicReference<>();
doAnswer(invocation -> {
ApplicationEvent event = invocation.getArgument(0);
if (event instanceof SyncCommitEvent) {
capturedEvent.set((SyncCommitEvent) event);
((SyncCommitEvent) event).markOffsetsReady();
}
return null;
}).when(applicationEventHandler).add(any());
completeUnsubscribeApplicationEventSuccessfully();
consumer.close(CloseOptions.timeout(Duration.ZERO));
// A commit was triggered and not completed exceptionally by the wakeup
assertNotNull(capturedEvent.get());
assertFalse(capturedEvent.get().future().isCompletedExceptionally());
}
@Test
public void testCloseAwaitPendingAsyncCommitIncomplete() {
time = new MockTime(1);
consumer = newConsumer();
// Commit async (incomplete)
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
final TopicPartition tp = new TopicPartition("foo", 0);
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 20);
markOffsetsReadyForCommitEvent();
consumer.commitAsync();
Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10))));
assertInstanceOf(TimeoutException.class, e.getCause());
}
@Test
public void testCloseAwaitPendingAsyncCommitComplete() {
time = new MockTime(1);
consumer = newConsumer();
MockCommitCallback cb = new MockCommitCallback();
// Commit async (complete)
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
final TopicPartition tp = new TopicPartition("foo", 0);
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 20);
completeCommitAsyncApplicationEventSuccessfully();
consumer.commitAsync(cb);
completeUnsubscribeApplicationEventSuccessfully();
assertDoesNotThrow(() -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10))));
assertEquals(1, cb.invoked);
}
@Test
public void testInterceptorAutoCommitOnClose() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitSyncApplicationEventSuccessfully();
completeUnsubscribeApplicationEventSuccessfully();
consumer.close(CloseOptions.timeout(Duration.ZERO));
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get());
}
@Test
public void testInterceptorCommitSync() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitSyncApplicationEventSuccessfully();
consumer.commitSync(mockTopicPartitionOffset());
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testNoInterceptorCommitSyncFailed() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
KafkaException expected = new KafkaException("Test exception");
completeCommitSyncApplicationEventExceptionally(expected);
KafkaException actual = assertThrows(KafkaException.class, () -> consumer.commitSync(mockTopicPartitionOffset()));
assertEquals(expected, actual);
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testInterceptorCommitAsync() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitAsyncApplicationEventSuccessfully();
consumer.commitAsync(mockTopicPartitionOffset(), new MockCommitCallback());
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
forceCommitCallbackInvocation();
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testNoInterceptorCommitAsyncFailed() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitAsyncApplicationEventExceptionally(new KafkaException("Test exception"));
consumer.commitAsync(mockTopicPartitionOffset(), new MockCommitCallback());
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
forceCommitCallbackInvocation();
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testSubscribeGeneratesEvent() {
consumer = newConsumer();
String topic = "topic1";
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singletonList(topic));
assertEquals(singleton(topic), consumer.subscription());
assertTrue(consumer.assignment().isEmpty());
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicSubscriptionChangeEvent.class));
}
@Test
public void testSubscribePatternGeneratesEvent() {
consumer = newConsumer();
Pattern pattern = Pattern.compile("topic.*");
completeTopicPatternSubscriptionChangeEventSuccessfully();
consumer.subscribe(pattern);
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicPatternSubscriptionChangeEvent.class));
}
@Test
public void testUnsubscribeGeneratesUnsubscribeEvent() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
ArgumentCaptor<UnsubscribeEvent> eventCaptor = ArgumentCaptor.forClass(UnsubscribeEvent.class);
verify(applicationEventHandler).add(eventCaptor.capture());
// check the deadline is set to the default API timeout
long deadline = time.milliseconds() + (int) ConsumerConfig.configDef().defaultValues().get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
assertTrue(eventCaptor.getValue().deadlineMs() <= deadline);
}
@Test
public void testSubscribeToEmptyListActsAsUnsubscribe() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.subscribe(Collections.emptyList());
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
verify(applicationEventHandler).add(ArgumentMatchers.isA(UnsubscribeEvent.class));
}
@Test
public void testSubscribeToNullTopicCollection() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe((List<String>) null));
}
@Test
public void testSubscriptionOnNullTopic() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(null)));
}
@Test
public void testSubscriptionOnEmptyTopic() {
consumer = newConsumer();
String emptyTopic = " ";
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(emptyTopic)));
}
@Test
public void testGroupMetadataAfterCreationWithGroupIdIsNull() {
final Properties props = requiredConsumerConfig();
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertFalse(config.unused().contains(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertFalse(config.unused().contains(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED));
final Throwable exception = assertThrows(InvalidGroupIdException.class, consumer::groupMetadata);
assertEquals(
"To use the group management or offset commit APIs, you must " +
"provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration.",
exception.getMessage()
);
}
@Test
public void testGroupMetadataAfterCreationWithGroupIdIsNotNull() {
final String groupId = "consumerGroupA";
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
assertEquals(groupId, groupMetadata.groupId());
assertEquals(Optional.empty(), groupMetadata.groupInstanceId());
assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
}
@Test
public void testGroupMetadataAfterCreationWithGroupIdIsNotNullAndGroupInstanceIdSet() {
final String groupId = "consumerGroupA";
final String groupInstanceId = "groupInstanceId1";
final Properties props = requiredConsumerConfigAndGroupId(groupId);
props.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId);
consumer = newConsumer(props);
final ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
assertEquals(groupId, groupMetadata.groupId());
assertEquals(Optional.of(groupInstanceId), groupMetadata.groupInstanceId());
assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
}
private MemberStateListener captureGroupMetadataUpdateListener(final MockedStatic<RequestManagers> requestManagers) {
ArgumentCaptor<MemberStateListener> applicationThreadMemberStateListener = ArgumentCaptor.forClass(MemberStateListener.class);
requestManagers.verify(() -> RequestManagers.supplier(
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
applicationThreadMemberStateListener.capture(),
any(),
any()
));
return applicationThreadMemberStateListener.getValue();
}
@Test
public void testGroupMetadataUpdate() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final ConsumerGroupMetadata oldGroupMetadata = consumer.groupMetadata();
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
final int expectedMemberEpoch = 42;
final String expectedMemberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(
Optional.of(expectedMemberEpoch),
expectedMemberId
);
final ConsumerGroupMetadata newGroupMetadata = consumer.groupMetadata();
assertEquals(oldGroupMetadata.groupId(), newGroupMetadata.groupId());
assertEquals(expectedMemberId, newGroupMetadata.memberId());
assertEquals(expectedMemberEpoch, newGroupMetadata.generationId());
assertEquals(oldGroupMetadata.groupInstanceId(), newGroupMetadata.groupInstanceId());
}
}
@SuppressWarnings("removal")
@Test
public void testGroupMetadataIsResetAfterUnsubscribe() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
consumer.subscribe(singletonList("topic"));
final int memberEpoch = 42;
final String memberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId);
final ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
}
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
final ConsumerGroupMetadata groupMetadataAfterUnsubscribe = new ConsumerGroupMetadata(
groupId,
JoinGroupRequest.UNKNOWN_GENERATION_ID,
JoinGroupRequest.UNKNOWN_MEMBER_ID,
Optional.empty()
);
assertEquals(groupMetadataAfterUnsubscribe, consumer.groupMetadata());
}
private Optional<StreamsRebalanceData> captureStreamRebalanceData(final MockedStatic<RequestManagers> requestManagers) {
ArgumentCaptor<Optional<StreamsRebalanceData>> streamRebalanceData = ArgumentCaptor.forClass(Optional.class);
requestManagers.verify(() -> RequestManagers.supplier(
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
streamRebalanceData.capture(),
any()
));
return streamRebalanceData.getValue();
}
@Test
public void testEmptyStreamRebalanceData() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final Optional<StreamsRebalanceData> groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers);
assertTrue(groupMetadataUpdateListener.isEmpty());
}
}
@Test
public void testStreamRebalanceData() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of());
consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData);
final Optional<StreamsRebalanceData> groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers);
assertTrue(groupMetadataUpdateListener.isPresent());
assertEquals(streamsRebalanceData, groupMetadataUpdateListener.get());
}
}
/**
* Tests that the consumer correctly invokes the callbacks for {@link ConsumerRebalanceListener} that was
* specified. We don't go through the full effort to emulate heartbeats and correct group management here. We're
* simply exercising the background {@link EventProcessor} does the correct thing when
* {@link AsyncKafkaConsumer#poll(Duration)} is called.
*
* Note that we test {@link ConsumerRebalanceListener} that throws errors in its different callbacks. Failed
* callback execution does <em>not</em> immediately errors. Instead, those errors are forwarded to the
* application event thread for the {@link ConsumerMembershipManager} to handle.
*/
@ParameterizedTest
@MethodSource("listenerCallbacksInvokeSource")
public void testListenerCallbacksInvoke(List<ConsumerRebalanceListenerMethodName> methodNames,
Optional<RuntimeException> revokedError,
Optional<RuntimeException> assignedError,
Optional<RuntimeException> lostError,
int expectedRevokedCount,
int expectedAssignedCount,
int expectedLostCount,
Optional<RuntimeException> expectedException
) {
consumer = newConsumer();
CounterConsumerRebalanceListener consumerRebalanceListener = new CounterConsumerRebalanceListener(
revokedError,
assignedError,
lostError
);
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(Collections.singletonList("topic"), consumerRebalanceListener);
SortedSet<TopicPartition> partitions = Collections.emptySortedSet();
for (ConsumerRebalanceListenerMethodName methodName : methodNames) {
CompletableBackgroundEvent<Void> e = new ConsumerRebalanceListenerCallbackNeededEvent(methodName, partitions);
backgroundEventQueue.add(e);
}
completeAsyncPollEventSuccessfully();
// This will trigger the background event queue to process our background event message.
// If any error is happening inside the rebalance callbacks, we expect the first exception to be thrown from poll.
if (expectedException.isPresent()) {
Exception exception = assertThrows(expectedException.get().getClass(), () -> consumer.poll(Duration.ZERO));
assertEquals(expectedException.get().getMessage(), exception.getMessage());
assertEquals(expectedException.get().getCause(), exception.getCause());
} else {
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
assertEquals(expectedRevokedCount, consumerRebalanceListener.revokedCount());
assertEquals(expectedAssignedCount, consumerRebalanceListener.assignedCount());
assertEquals(expectedLostCount, consumerRebalanceListener.lostCount());
}
private static Stream<Arguments> listenerCallbacksInvokeSource() {
Optional<RuntimeException> empty = Optional.empty();
Optional<RuntimeException> error = Optional.of(new RuntimeException("Intentional error"));
Optional<RuntimeException> kafkaException = Optional.of(new KafkaException("Intentional error"));
Optional<RuntimeException> wrappedException = Optional.of(new KafkaException("User rebalance callback throws an error", error.get()));
return Stream.of(
// Tests if we don't have an event, the listener doesn't get called.
Arguments.of(Collections.emptyList(), empty, empty, empty, 0, 0, 0, empty),
// Tests if we get an event for a revocation, that we invoke our listener.
Arguments.of(Collections.singletonList(ON_PARTITIONS_REVOKED), empty, empty, empty, 1, 0, 0, empty),
// Tests if we get an event for an assignment, that we invoke our listener.
Arguments.of(Collections.singletonList(ON_PARTITIONS_ASSIGNED), empty, empty, empty, 0, 1, 0, empty),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_LOST), empty, empty, empty, 0, 0, 1, empty),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_REVOKED), error, empty, empty, 1, 0, 0, wrappedException),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_ASSIGNED), empty, error, empty, 0, 1, 0, wrappedException),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_LOST), empty, empty, error, 0, 0, 1, wrappedException),
// Tests that we invoke our listener even if it encounters an exception. Special case to test that a kafka exception is not wrapped.
Arguments.of(Collections.singletonList(ON_PARTITIONS_REVOKED), kafkaException, empty, empty, 1, 0, 0, kafkaException),
Arguments.of(Collections.singletonList(ON_PARTITIONS_ASSIGNED), empty, kafkaException, empty, 0, 1, 0, kafkaException),
Arguments.of(Collections.singletonList(ON_PARTITIONS_LOST), empty, empty, kafkaException, 0, 0, 1, kafkaException),
// Tests if we get separate events for revocation and then assignment--AND our revocation throws an error--
// we still invoke the listeners correctly and throw the error.
Arguments.of(Arrays.asList(ON_PARTITIONS_REVOKED, ON_PARTITIONS_ASSIGNED), error, empty, empty, 1, 1, 0, wrappedException),
// Tests if we get separate events for revocation and then assignment--AND both throws an error--
// we still invoke the listeners correctly and throw the first error.
Arguments.of(Arrays.asList(ON_PARTITIONS_REVOKED, ON_PARTITIONS_ASSIGNED), kafkaException, error, empty, 1, 1, 0, kafkaException)
);
}
@Test
public void testBackgroundError() {
final String groupId = "consumerGroupA";
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final KafkaException expectedException = new KafkaException("Nobody expects the Spanish Inquisition");
final ErrorEvent errorEvent = new ErrorEvent(expectedException);
backgroundEventQueue.add(errorEvent);
completeAssignmentChangeEventSuccessfully();
consumer.assign(singletonList(new TopicPartition("topic", 0)));
completeAsyncPollEventSuccessfully();
final KafkaException exception = assertThrows(KafkaException.class, () -> consumer.poll(Duration.ZERO));
assertEquals(expectedException.getMessage(), exception.getMessage());
}
@Test
public void testMultipleBackgroundErrors() {
final String groupId = "consumerGroupA";
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final KafkaException expectedException1 = new KafkaException("Nobody expects the Spanish Inquisition");
final ErrorEvent errorEvent1 = new ErrorEvent(expectedException1);
backgroundEventQueue.add(errorEvent1);
final KafkaException expectedException2 = new KafkaException("Spam, Spam, Spam");
final ErrorEvent errorEvent2 = new ErrorEvent(expectedException2);
backgroundEventQueue.add(errorEvent2);
completeAssignmentChangeEventSuccessfully();
consumer.assign(singletonList(new TopicPartition("topic", 0)));
completeAsyncPollEventSuccessfully();
final KafkaException exception = assertThrows(KafkaException.class, () -> consumer.poll(Duration.ZERO));
assertEquals(expectedException1.getMessage(), exception.getMessage());
assertTrue(backgroundEventQueue.isEmpty());
}
@Test
public void testGroupRemoteAssignorUnusedIfGroupIdUndefined() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "someAssignor");
props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT));
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertTrue(config.unused().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG));
}
@Test
public void testGroupRemoteAssignorInClassicProtocol() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroupA");
props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT));
props.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "someAssignor");
assertThrows(ConfigException.class, () -> new ConsumerConfig(props));
}
@Test
public void testGroupRemoteAssignorUsedInConsumerProtocol() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroupA");
props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT));
props.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "someAssignor");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertFalse(config.unused().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG));
}
@Test
public void testGroupIdNull() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10000);
props.put(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, true);
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertFalse(config.unused().contains(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertFalse(config.unused().contains(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED));
}
@Test
public void testGroupIdNotNullAndValid() {
final Properties props = requiredConsumerConfigAndGroupId("consumerGroupA");
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10000);
props.put(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, true);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertTrue(config.unused().contains(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertTrue(config.unused().contains(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED));
}
@Test
public void testEnsurePollEventSentOnConsumerPoll() {
SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE);
consumer = newConsumer(
mock(FetchBuffer.class),
new ConsumerInterceptors<>(Collections.emptyList(), metrics),
mock(ConsumerRebalanceListenerInvoker.class),
subscriptions);
final TopicPartition tp = new TopicPartition("topic", 0);
final List<ConsumerRecord<String, String>> records = singletonList(
new ConsumerRecord<>("topic", 0, 2, "key1", "value1"));
doAnswer(invocation -> Fetch.forPartition(tp, records, true, new OffsetAndMetadata(3, Optional.of(0), "")))
.when(fetchCollector)
.collectFetch(Mockito.any(FetchBuffer.class));
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singletonList("topic1"));
completeAsyncPollEventSuccessfully();
consumer.poll(Duration.ofMillis(100));
verify(applicationEventHandler, atLeastOnce()).add(any(AsyncPollEvent.class));
}
private Properties requiredConsumerConfigAndGroupId(final String groupId) {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return props;
}
@Test
public void testLongPollWaitIsLimited() {
consumer = newConsumer();
String topicName = "topic1";
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singletonList(topicName));
assertEquals(singleton(topicName), consumer.subscription());
assertTrue(consumer.assignment().isEmpty());
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
final List<ConsumerRecord<String, String>> records = asList(
new ConsumerRecord<>(topicName, partition, 2, "key1", "value1"),
new ConsumerRecord<>(topicName, partition, 3, "key2", "value2")
);
final OffsetAndMetadata nextOffsetAndMetadata = new OffsetAndMetadata(4, Optional.of(0), "");
// On the first iteration, return no data; on the second, return two records
Set<TopicPartition> partitions = singleton(tp);
doAnswer(invocation -> {
// Mock the subscription being assigned as the first fetch is collected
consumer.subscriptions().assignFromSubscribed(partitions);
consumer.setGroupAssignmentSnapshot(partitions);
return Fetch.empty();
}).doAnswer(invocation ->
Fetch.forPartition(tp, records, true, nextOffsetAndMetadata)
).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeAsyncPollEventSuccessfully();
// And then poll for up to 10000ms, which should return 2 records without timing out
ConsumerRecords<?, ?> returnedRecords = consumer.poll(Duration.ofMillis(10000));
assertEquals(2, returnedRecords.count());
assertEquals(4, returnedRecords.nextOffsets().get(tp).offset());
assertEquals(Optional.of(0), returnedRecords.nextOffsets().get(tp).leaderEpoch());
assertEquals(singleton(topicName), consumer.subscription());
assertEquals(partitions, consumer.assignment());
}
/**
* Tests {@link AsyncKafkaConsumer#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents}
* handles the case where the {@link Future} takes a bit of time to complete, but does within the timeout.
*/
@Test
public void testProcessBackgroundEventsWithInitialDelay() throws Exception {
consumer = newConsumer();
Timer timer = time.timer(1000);
CompletableFuture<?> future = mock(CompletableFuture.class);
CountDownLatch latch = new CountDownLatch(3);
// Mock our call to Future.get(timeout) so that it mimics a delay of 200 milliseconds. Keep in mind that
// the incremental timeout inside processBackgroundEvents is 100 seconds for each pass. Our first two passes
// will exceed the incremental timeout, but the third will return.
doAnswer(invocation -> {
latch.countDown();
if (latch.getCount() > 0) {
long timeout = invocation.getArgument(0, Long.class);
timer.sleep(timeout);
throw new java.util.concurrent.TimeoutException("Intentional timeout");
}
future.complete(null);
return null;
}).when(future).get(any(Long.class), any(TimeUnit.class));
consumer.processBackgroundEvents(future, timer, e -> false);
// 800 is the 1000 ms timeout (above) minus the 200 ms delay for the two incremental timeouts/retries.
assertEquals(800, timer.remainingMs());
}
/**
* Tests {@link AsyncKafkaConsumer#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents}
* handles the case where the {@link Future} is already complete when invoked, so it doesn't have to wait.
*/
@Test
public void testProcessBackgroundEventsWithoutDelay() {
consumer = newConsumer();
Timer timer = time.timer(1000);
// Create a future that is already completed.
CompletableFuture<?> future = CompletableFuture.completedFuture(null);
consumer.processBackgroundEvents(future, timer, e -> false);
// Because we didn't need to perform a timed get, we should still have every last millisecond
// of our initial timeout.
assertEquals(1000, timer.remainingMs());
}
/**
* Tests {@link AsyncKafkaConsumer#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents}
* handles the case where the {@link Future} does not complete within the timeout.
*/
@Test
public void testProcessBackgroundEventsTimesOut() throws Exception {
consumer = newConsumer();
Timer timer = time.timer(1000);
CompletableFuture<?> future = mock(CompletableFuture.class);
doAnswer(invocation -> {
long timeout = invocation.getArgument(0, Long.class);
timer.sleep(timeout);
throw new java.util.concurrent.TimeoutException("Intentional timeout");
}).when(future).get(any(Long.class), any(TimeUnit.class));
assertThrows(TimeoutException.class, () -> consumer.processBackgroundEvents(future, timer, e -> false));
// Because we forced our mocked future to continuously time out, we should have no time remaining.
assertEquals(0, timer.remainingMs());
}
/**
* Tests that calling {@link Thread#interrupt()} before {@link KafkaConsumer#poll(Duration)}
* causes {@link InterruptException} to be thrown.
*/
@Test
public void testPollThrowsInterruptExceptionIfInterrupted() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
// interrupt the thread and call poll
try {
Thread.currentThread().interrupt();
completeAsyncPollEventSuccessfully();
assertThrows(InterruptException.class, () -> consumer.poll(Duration.ZERO));
} finally {
// clear interrupted state again since this thread may be reused by JUnit
Thread.interrupted();
}
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
@Test
void testReaperInvokedInClose() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.close();
verify(backgroundEventReaper).reap(backgroundEventQueue);
}
@Test
void testReaperInvokedInUnsubscribe() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
verify(backgroundEventReaper).reap(time.milliseconds());
}
@Test
void testReaperInvokedInPoll() {
consumer = newConsumer();
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(Collections.singletonList("topic"));
completeAsyncPollEventSuccessfully();
consumer.poll(Duration.ZERO);
verify(backgroundEventReaper).reap(time.milliseconds());
}
@Test
public void testUnsubscribeWithoutGroupId() {
consumer = newConsumerWithoutGroupId();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
verify(applicationEventHandler).add(ArgumentMatchers.isA(UnsubscribeEvent.class));
}
@Test
public void testSeekToBeginning() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
consumer.seekToBeginning(topics);
CompletableApplicationEvent<Void> event = addAndGetLastEnqueuedEvent();
ResetOffsetEvent resetOffsetEvent = assertInstanceOf(ResetOffsetEvent.class, event);
assertEquals(topics, new HashSet<>(resetOffsetEvent.topicPartitions()));
assertEquals(AutoOffsetResetStrategy.EARLIEST, resetOffsetEvent.offsetResetStrategy());
}
@Test
public void testSeekToBeginningWithException() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
completeResetOffsetEventExceptionally(new TimeoutException());
assertThrows(TimeoutException.class, () -> consumer.seekToBeginning(topics));
}
@Test
public void testSeekToEndWithException() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
completeResetOffsetEventExceptionally(new TimeoutException());
assertThrows(TimeoutException.class, () -> consumer.seekToEnd(topics));
}
@Test
public void testSeekToEnd() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
consumer.seekToEnd(topics);
CompletableApplicationEvent<Void> event = addAndGetLastEnqueuedEvent();
ResetOffsetEvent resetOffsetEvent = assertInstanceOf(ResetOffsetEvent.class, event);
assertEquals(topics, new HashSet<>(resetOffsetEvent.topicPartitions()));
assertEquals(AutoOffsetResetStrategy.LATEST, resetOffsetEvent.offsetResetStrategy());
}
@Test
public void testSubscribeToRe2JPatternValidation() {
consumer = newConsumer();
Throwable t = assertThrows(IllegalArgumentException.class, () -> consumer.subscribe((SubscriptionPattern) null));
assertEquals("Topic pattern to subscribe to cannot be null", t.getMessage());
t = assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern("")));
assertEquals("Topic pattern to subscribe to cannot be empty", t.getMessage());
assertDoesNotThrow(() -> consumer.subscribe(new SubscriptionPattern("t*")));
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"), null));
assertDoesNotThrow(() -> consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class)));
}
@Test
public void testSubscribeToRe2JPatternThrowsIfNoGroupId() {
consumer = newConsumer(requiredConsumerConfig());
assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(new SubscriptionPattern("t*")));
assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"),
mock(ConsumerRebalanceListener.class)));
}
@Test
public void testSubscribeToRe2JPatternGeneratesEvent() {
consumer = newConsumer();
completeTopicRe2JPatternSubscriptionChangeEventSuccessfully();
consumer.subscribe(new SubscriptionPattern("t*"));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class));
clearInvocations(applicationEventHandler);
consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class));
}
// SubscriptionPattern is supported as of ConsumerGroupHeartbeatRequest v1. Clients using subscribe
// (SubscribePattern) against older broker versions should get UnsupportedVersionException on poll after subscribe
@Test
public void testSubscribePatternAgainstBrokerNotSupportingRegex() throws InterruptedException {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
final ConsumerConfig config = new ConsumerConfig(props);
ConsumerMetadata metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false,
mock(SubscriptionState.class), new LogContext(), new ClusterResourceListeners());
MockClient client = new MockClient(time, metadata);
MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(1, Map.of("topic1", 2),
Map.of("topic1", Uuid.randomUuid()));
client.updateMetadata(initialMetadata);
// ConsumerGroupHeartbeat v0 does not support broker-side regex resolution
client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.CONSUMER_GROUP_HEARTBEAT.id, (short) 0, (short) 0));
// Mock response to find coordinator
Node node = metadata.fetch().nodes().get(0);
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node), node);
// Mock HB response (needed so that the MockClient builds the request)
ConsumerGroupHeartbeatResponse result =
new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData()
.setMemberId("")
.setMemberEpoch(0));
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(result, coordinator);
SubscriptionState subscriptionState = mock(SubscriptionState.class);
consumer = new AsyncKafkaConsumer<>(
new LogContext(),
time,
config,
new StringDeserializer(),
new StringDeserializer(),
client,
subscriptionState,
metadata
);
completeTopicRe2JPatternSubscriptionChangeEventSuccessfully();
SubscriptionPattern pattern = new SubscriptionPattern("t*");
consumer.subscribe(pattern);
when(subscriptionState.subscriptionPattern()).thenReturn(pattern);
TestUtils.waitForCondition(() -> {
try {
// The request is generated in the background thread so allow for that
// async operation to happen to detect the failure.
consumer.poll(Duration.ZERO);
return false;
} catch (UnsupportedVersionException e) {
return true;
}
}, "Consumer did not throw the expected UnsupportedVersionException on poll");
}
@Test
public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() {
consumer = newConsumer(
mock(FetchBuffer.class),
mock(ConsumerInterceptors.class),
mock(ConsumerRebalanceListenerInvoker.class),
mock(SubscriptionState.class));
Metrics metrics = consumer.metricsRegistry();
AsyncConsumerMetrics asyncConsumerMetrics = consumer.asyncConsumerMetrics();
ConsumerRebalanceListenerCallbackNeededEvent event = new ConsumerRebalanceListenerCallbackNeededEvent(ON_PARTITIONS_REVOKED, Collections.emptySortedSet());
event.setEnqueuedMs(time.milliseconds());
backgroundEventQueue.add(event);
asyncConsumerMetrics.recordBackgroundEventQueueSize(1);
time.sleep(10);
consumer.processBackgroundEvents();
assertEquals(0, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_METRIC_GROUP)).metricValue());
assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-avg", CONSUMER_METRIC_GROUP)).metricValue());
assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP)).metricValue());
}
@Test
public void testFailConstructor() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id");
props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class");
final ConsumerConfig config = new ConsumerConfig(props);
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
KafkaException ce = assertThrows(
KafkaException.class,
() -> newConsumer(config));
assertTrue(ce.getMessage().contains("Failed to construct kafka consumer"), "Unexpected exception message: " + ce.getMessage());
assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause());
boolean npeLogged = appender.getEvents().stream()
.flatMap(event -> event.getThrowableInfo().stream())
.anyMatch(str -> str.contains("NullPointerException"));
assertFalse(npeLogged, "Unexpected NullPointerException during consumer construction");
}
}
private Map<TopicPartition, OffsetAndMetadata> mockTopicPartitionOffset() {
final TopicPartition t0 = new TopicPartition("t0", 2);
final TopicPartition t1 = new TopicPartition("t0", 3);
Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsets = new HashMap<>();
topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L));
topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L));
return topicPartitionOffsets;
}
private Map<TopicPartition, OffsetAndTimestampInternal> mockOffsetAndTimestamp() {
final TopicPartition t0 = new TopicPartition("t0", 2);
final TopicPartition t1 = new TopicPartition("t0", 3);
Map<TopicPartition, OffsetAndTimestampInternal> offsetAndTimestamp = new HashMap<>();
offsetAndTimestamp.put(t0, new OffsetAndTimestampInternal(5L, 1L, Optional.empty()));
offsetAndTimestamp.put(t1, new OffsetAndTimestampInternal(6L, 3L, Optional.empty()));
return offsetAndTimestamp;
}
private Map<TopicPartition, Long> mockTimestampToSearch() {
final TopicPartition t0 = new TopicPartition("t0", 2);
final TopicPartition t1 = new TopicPartition("t0", 3);
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(t0, 1L);
timestampToSearch.put(t1, 2L);
return timestampToSearch;
}
private void completeCommitAsyncApplicationEventExceptionally(Exception ex) {
doAnswer(invocation -> {
AsyncCommitEvent event = invocation.getArgument(0);
event.markOffsetsReady();
event.future().completeExceptionally(ex);
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(AsyncCommitEvent.class));
}
private void completeCommitSyncApplicationEventExceptionally(Exception ex) {
doAnswer(invocation -> {
SyncCommitEvent event = invocation.getArgument(0);
event.markOffsetsReady();
event.future().completeExceptionally(ex);
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(SyncCommitEvent.class));
}
private void completeResetOffsetEventExceptionally(Exception ex) {
doThrow(ex).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ResetOffsetEvent.class));
}
private void completeCommitAsyncApplicationEventSuccessfully() {
doAnswer(invocation -> {
AsyncCommitEvent event = invocation.getArgument(0);
event.markOffsetsReady();
event.future().complete(null);
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(AsyncCommitEvent.class));
}
private void completeCommitSyncApplicationEventSuccessfully() {
doAnswer(invocation -> {
SyncCommitEvent event = invocation.getArgument(0);
event.markOffsetsReady();
event.future().complete(null);
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(SyncCommitEvent.class));
}
private void completeFetchedCommittedOffsetApplicationEventSuccessfully(final Map<TopicPartition, OffsetAndMetadata> committedOffsets) {
doReturn(committedOffsets)
.when(applicationEventHandler)
.addAndGet(any(FetchCommittedOffsetsEvent.class));
doAnswer(invocation -> {
FetchCommittedOffsetsEvent event = invocation.getArgument(0);
event.future().complete(committedOffsets);
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(FetchCommittedOffsetsEvent.class));
}
private void completeFetchedCommittedOffsetApplicationEventExceptionally(Exception ex) {
doThrow(ex)
.when(applicationEventHandler)
.addAndGet(any(FetchCommittedOffsetsEvent.class));
}
private void completeUnsubscribeApplicationEventSuccessfully() {
doAnswer(invocation -> {
UnsubscribeEvent event = invocation.getArgument(0);
consumer.subscriptions().unsubscribe();
event.future().complete(null);
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(UnsubscribeEvent.class));
}
private void completeAssignmentChangeEventSuccessfully() {
doAnswer(invocation -> {
AssignmentChangeEvent event = invocation.getArgument(0);
HashSet<TopicPartition> partitions = new HashSet<>(event.partitions());
consumer.subscriptions().assignFromUser(partitions);
event.future().complete(null);
return null;
}).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(AssignmentChangeEvent.class));
}
private void completeTopicSubscriptionChangeEventSuccessfully() {
doAnswer(invocation -> {
TopicSubscriptionChangeEvent event = invocation.getArgument(0);
consumer.subscriptions().subscribe(event.topics(), event.listener());
event.future().complete(null);
return null;
}).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicSubscriptionChangeEvent.class));
}
private void completeTopicPatternSubscriptionChangeEventSuccessfully() {
doAnswer(invocation -> {
TopicPatternSubscriptionChangeEvent event = invocation.getArgument(0);
consumer.subscriptions().subscribe(event.pattern(), event.listener());
event.future().complete(null);
return null;
}).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicPatternSubscriptionChangeEvent.class));
}
private void completeTopicRe2JPatternSubscriptionChangeEventSuccessfully() {
doAnswer(invocation -> {
TopicRe2JPatternSubscriptionChangeEvent event = invocation.getArgument(0);
consumer.subscriptions().subscribe(event.pattern(), event.listener());
event.future().complete(null);
return null;
}).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class));
}
private void completeSeekUnvalidatedEventSuccessfully() {
doAnswer(invocation -> {
SeekUnvalidatedEvent event = invocation.getArgument(0);
SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition(
event.offset(),
event.offsetEpoch(),
metadata.currentLeader(event.partition())
);
consumer.subscriptions().seekUnvalidated(event.partition(), newPosition);
event.future().complete(null);
return null;
}).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(SeekUnvalidatedEvent.class));
}
private void forceCommitCallbackInvocation() {
// Invokes callback
consumer.commitAsync();
}
private void markOffsetsReadyForCommitEvent() {
doAnswer(invocation -> {
CommitEvent event = invocation.getArgument(0);
event.markOffsetsReady();
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(CommitEvent.class));
}
@Test
public void testCloseInvokesStreamsRebalanceListenerOnTasksRevokedWhenMemberEpochPositive() {
final String groupId = "streamsGroup";
final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of());
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData);
StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class);
consumer.subscribe(singletonList("topic"), mockStreamsListener);
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
final int memberEpoch = 42;
final String memberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId);
consumer.close(CloseOptions.timeout(Duration.ZERO));
verify(mockStreamsListener).onTasksRevoked(any());
}
}
@Test
public void testCloseInvokesStreamsRebalanceListenerOnAllTasksLostWhenMemberEpochZeroOrNegative() {
final String groupId = "streamsGroup";
final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of());
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData);
StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class);
consumer.subscribe(singletonList("topic"), mockStreamsListener);
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
final int memberEpoch = 0;
final String memberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId);
consumer.close(CloseOptions.timeout(Duration.ZERO));
verify(mockStreamsListener).onAllTasksLost();
}
}
@Test
public void testCloseWrapsStreamsRebalanceListenerException() {
final String groupId = "streamsGroup";
final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of());
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData);
StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class);
RuntimeException testException = new RuntimeException("Test streams listener exception");
doThrow(testException).when(mockStreamsListener).onTasksRevoked(any());
consumer.subscribe(singletonList("topic"), mockStreamsListener);
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
final int memberEpoch = 1;
final String memberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId);
KafkaException thrownException = assertThrows(KafkaException.class,
() -> consumer.close(CloseOptions.timeout(Duration.ZERO)));
assertInstanceOf(RuntimeException.class, thrownException.getCause());
assertTrue(thrownException.getCause().getMessage().contains("Test streams listener exception"));
verify(mockStreamsListener).onTasksRevoked(any());
}
}
private void completeAsyncPollEventSuccessfully() {
doAnswer(invocation -> {
AsyncPollEvent event = invocation.getArgument(0);
event.completeSuccessfully();
return null;
}).when(applicationEventHandler).add(ArgumentMatchers.isA(AsyncPollEvent.class));
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND date_start <= '%s' AND id NOT IN (%s)) %s %s" % (fiscalyear_clause, first_period.date_start, ids, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if initial_bal and not context.get('periods', False) and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
if isinstance(data.get('partner_id'), (int, long)):
part = partner_obj.browse(cr, uid, data['partner_id'], context=context)
elif isinstance(data.get('partner_id'), (tuple, list)):
part = partner_obj.browse(cr, uid, data['partner_id'][0], context=context)
else:
part = False
if account and part:
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = invoice_id and (invoice_id, invoice_names[invoice_id]) or False
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('move_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref,
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
amount = line.amount_residual
amount_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount = -amount
amount_currency = -amount_currency
# Get right debit / credit:
target_currency = target_currency or company_currency
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line_currency != company_currency:
total_amount = line.amount_currency
actual_debit = debit > 0 and amount_currency or 0.0
actual_credit = credit > 0 and amount_currency or 0.0
else:
total_amount = abs(debit - credit)
actual_debit = debit > 0 and amount or 0.0
actual_credit = credit > 0 and amount or 0.0
if line_currency != target_currency:
amount_currency_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ret_line['credit_currency'] = actual_credit
ret_line['debit_currency'] = actual_debit
ctx = context.copy()
if target_date:
ctx.update({'date': target_date})
total_amount = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, total_amount, context=ctx)
actual_debit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_debit, context=ctx)
actual_credit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_credit, context=ctx)
amount_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['debit'] = actual_debit
ret_line['credit'] = actual_credit
ret_line['amount_str'] = amount_str
ret_line['total_amount_str'] = total_amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None):
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date""")
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise osv.except_osv(_('Warning!'),
_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if update_check:
if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error!'), _('You can not add/modify entries in a closed period %s of journal %s.' % (period.name,journal.name)))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No Piece Number!'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the Tax movement
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and not context.get('novalidate') and (context.get('recompute', True) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#Test the FitPara classes
import sys
sys.path.append("/home/mscook/Desktop/PhD/Projects/pyParaTools")
from ParaParser import *
from CalcPara import *
from FitPara import *
from ParaUtils import *
print 80*'-'
print " Testing FitPara.py"
print 80*'-'
#python test_FitPara.py pre ~/Desktop/PREfit_python/TESTDATA/PDB/m0.pdb ~/Desktop/PREfit_python/TESTDATA/PRE/EXPERIMENTAL/PREdata_intra_contribution.pre -4.20536 36.52032 -3.35519 13554627736.87463
#pre1 = PREParser(sys.argv)
#pre1.doParse()
#pre_calc = CalcPara()
#pre_calc.PRE(pre1)
#pre_fit = FitPara()
#pre_fit.pre_monomer_fixed_c(pre1)
##python test_FitPara.py pcs STRUCTURES/epsilon.pdb DATASETS/PCS/PCS_epsilon_CNH.npc 1 1 1 30 10 90 90 90
#pcs1 = PCSParser(sys.argv)
#pcs1.doParse()
##print pcs1.getParsed()
#pcs_calcer = CalcPara()
#pcs_calcer.PCSZYZ(pcs1)
#print pcs_1
#pcs_fit = FitPara()
#pcs_fit.pcs_monomer(pcs1)
rdc_in = ['PROTOCOL_NAME', 'rdc', 'STRUCTURES/epsilon.pdb',
'DATASETS/RDC/epsilon.rdc','-25.392', '39.731', '59.846', '30.591', '240.403']
print '-23.392', '37.731', '57.846', '25.591', '230.403'
rdc = RDCParser(rdc_in)
rdc.doParse()
#rdc_calcer = CalcPara()
#rdc_calcer.RDC(rdc, 'ZYZ')
B0 = rdc.getB0()
temp = rdc.getTemp()
S = rdc.getOrder()
g1 = lookupMGR('H')
g2 = lookupMGR('N')[1]
scal = rdcScal(S, g1, g2, B0, temp)
rdc_fitter = FitPara()
rdc_fitter.RDC(rdc, 0, scal) | unknown | codeparrot/codeparrot-clean | ||
name: Misc
on: [push, pull_request, merge_group]
concurrency:
group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
permissions:
contents: read
jobs:
checks:
name: Miscellaneous checks
permissions:
contents: write # for Git to git push
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
token: ${{ (github.repository == 'ruby/ruby' && !startsWith(github.event_name, 'pull')) && secrets.MATZBOT_AUTO_UPDATE_TOKEN || secrets.GITHUB_TOKEN }}
- uses: ./.github/actions/setup/directories
with:
makeup: true
# Skip overwriting MATZBOT_AUTO_UPDATE_TOKEN
checkout: '' # false (ref: https://github.com/actions/runner/issues/2238)
- name: Re-generate Makefiles
run: |
# config.status needs to run as a shell script
{ echo ':&&exit'; cat tool/prereq.status; } > config.status
: # same as actions/setup/directories/action.yml
for mk in Makefile GNUmakefile; do
sed -f tool/prereq.status template/$mk.in > $mk
done
- name: Check for code styles
run: |
set -x
ruby tool/auto-style.rb "$GITHUB_OLD_SHA" "$GITHUB_NEW_SHA"
env:
GITHUB_OLD_SHA: ${{ github.event.pull_request.base.sha }}
GITHUB_NEW_SHA: ${{ github.event.pull_request.merge_commit_sha }}
# Skip 'push' events because post_push.yml fixes them on push
if: ${{ github.repository == 'ruby/ruby' && startsWith(github.event_name, 'pull') }}
- name: Check for bash specific substitution in configure.ac
run: |
git grep -n '\${[A-Za-z_0-9]*/' -- configure.ac && exit 1 || :
- name: Check for header macros
run: |
fail=
for header in ruby/*.h; do
git grep -l -F -e $header -e HAVE_`echo $header | tr a-z./ A-Z__` -- . > /dev/null && continue
fail=1
echo $header
done
exit $fail
working-directory: include
- id: now
run: |
date +"mon=%-m"%n"day=%-d" >> $GITHUB_OUTPUT
env:
TZ: Tokyo/Asia
- id: deprecation
run: |
eval $(sed -n 's/^#define RUBY_API_VERSION_\(MAJOR\|MINOR\) /\1=/p' include/ruby/version.h)
if git --no-pager grep --color -o 'rb_warn_deprecated_to_remove_at('$MAJOR'\.'$MINOR',.*' -- '*.c' >&2; then
false
else
true
fi
continue-on-error: ${{ steps.now.outputs.mon < 12 }}
- name: Check if to generate documents
id: rdoc
run: |
set -- $(sed 's/#.*//;/^rdoc /!d' gems/bundled_gems)
{ echo version=$2; echo ref=$4; } >> $GITHUB_OUTPUT
- name: Checkout rdoc
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: ruby/rdoc
ref: ${{ steps.rdoc.outputs.ref }}
path: .bundle/gems/rdoc-${{ steps.rdoc.outputs.version }}
if: ${{ steps.rdoc.outputs.ref != '' }}
- name: Generate rdoc scripts
run: |
set -x
gempath=$(ruby -e 'print Gem.user_dir, "/bin"')
PATH=$gempath:$PATH
gem install --user bundler
bundle config --local path vendor/bundle
bundle install --jobs 4
bundle exec rake generate
working-directory: .bundle/gems/rdoc-${{ steps.rdoc.outputs.version }}
if: ${{ steps.rdoc.outputs.ref != '' }}
- name: Core docs coverage
run: |
make XRUBY=ruby RDOC_DEPENDS= RBCONFIG=update-rbconfig rdoc-coverage
- name: Generate docs
id: docs
run: |
make XRUBY=ruby RDOC_DEPENDS= RBCONFIG=update-rbconfig html
echo htmlout=ruby-html-${GITHUB_SHA:0:10} >> $GITHUB_OUTPUT
# Generate only when document commit/PR
if: >-
${{false
|| contains(github.event.head_commit.message, '[ruby/rdoc]')
|| contains(github.event.head_commit.message, '[DOC]')
|| contains(github.event.pull_request.title, '[DOC]')
|| contains(github.event.pull_request.labels.*.name, 'Documentation')
}}
- name: Upload docs
uses: actions/upload-artifact@v6.0.0
with:
path: html
name: ${{ steps.docs.outputs.htmlout }}
if: ${{ steps.docs.outcome == 'success' }}
- uses: ./.github/actions/slack
with:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
if: ${{ failure() }} | unknown | github | https://github.com/ruby/ruby | .github/workflows/check_misc.yml |
#!/usr/bin/env ruby
require 'benchmark'
i, o = IO.pipe
o.sync = true
DOT = ".".freeze
chunks = 100_000.times.collect{DOT}
thread = Thread.new do
while i.read(1024)
end
end
100.times do
o.write(*chunks)
end
o.close
thread.join | ruby | github | https://github.com/ruby/ruby | benchmark/io_write.rb |
# populator/helpers/luks.py
# LUKS backend code for populating a DeviceTree.
#
# Copyright (C) 2009-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU Lesser General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY expressed or implied, including the implied
# warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU Lesser General Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with this
# program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks
# that are incorporated in the source code or documentation are not subject
# to the GNU Lesser General Public License and may only be used or
# replicated with the express permission of Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
from ... import udev
from ...devices import LUKSDevice
from ...errors import DeviceError, LUKSError
from ...flags import flags
from .devicepopulator import DevicePopulator
from .formatpopulator import FormatPopulator
from ...static_data import luks_data
import logging
log = logging.getLogger("blivet")
class LUKSDevicePopulator(DevicePopulator):
@classmethod
def match(cls, data):
return udev.device_is_dm_luks(data)
def run(self):
parents = self._devicetree._add_slave_devices(self.data)
device = LUKSDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
exists=True)
self._devicetree._add_device(device)
return device
class LUKSFormatPopulator(FormatPopulator):
priority = 100
_type_specifier = "luks"
def _get_kwargs(self):
kwargs = super()._get_kwargs()
kwargs["name"] = "luks-%s" % udev.device_get_uuid(self.data)
return kwargs
def run(self):
super().run()
if not self.device.format.uuid:
log.info("luks device %s has no uuid", self.device.path)
return
# look up or create the mapped device
if not self._devicetree.get_device_by_name(self.device.format.map_name):
passphrase = luks_data.luks_devs.get(self.device.format.uuid)
if self.device.format.configured:
pass
elif passphrase:
self.device.format.passphrase = passphrase
elif self.device.format.uuid in luks_data.luks_devs:
log.info("skipping previously-skipped luks device %s",
self.device.name)
elif self._devicetree._cleanup or flags.testing:
# if we're only building the devicetree so that we can
# tear down all of the devices we don't need a passphrase
if self.device.format.status:
# this makes device.configured return True
self.device.format.passphrase = 'yabbadabbadoo'
else:
# Try each known passphrase. Include luks_data.luks_devs values in case a
# passphrase has been set for a specific device without a full
# reset/populate, in which case the new passphrase would not be
# in luks_data.passphrases.
passphrases = luks_data.passphrases + list(luks_data.luks_devs.values())
for passphrase in passphrases:
self.device.format.passphrase = passphrase
try:
self.device.format.setup()
except blockdev.BlockDevError:
self.device.format.passphrase = None
else:
break
luks_device = LUKSDevice(self.device.format.map_name,
parents=[self.device],
exists=True)
try:
luks_device.setup()
except (LUKSError, blockdev.CryptoError, DeviceError) as e:
log.info("setup of %s failed: %s", self.device.format.map_name, e)
self.device.remove_child(luks_device)
else:
luks_device.update_sysfs_path()
self._devicetree._add_device(luks_device)
luks_info = udev.get_device(luks_device.sysfs_path)
if not luks_info:
log.error("failed to get udev data for %s", luks_device.name)
return
self._devicetree.handle_device(luks_info, update_orig_fmt=True)
else:
log.warning("luks device %s already in the tree",
self.device.format.map_name) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fake SSLContext implementation."""
try:
import ssl
except ImportError:
pass
class SSLContext(object):
"""A fake SSLContext.
This implements an API similar to ssl.SSLContext from python 3.2
but does not implement methods or properties that would be
incompatible with ssl.wrap_socket from python 2.6.
You must pass protocol which must be one of the PROTOCOL_* constants
defined in the ssl module. ssl.PROTOCOL_SSLv23 is recommended for maximum
interoperability.
"""
__slots__ = ('_cafile', '_certfile',
'_keyfile', '_protocol', '_verify_mode')
def __init__(self, protocol):
self._cafile = None
self._certfile = None
self._keyfile = None
self._protocol = protocol
self._verify_mode = ssl.CERT_NONE
@property
def protocol(self):
"""The protocol version chosen when constructing the context.
This attribute is read-only.
"""
return self._protocol
def __get_verify_mode(self):
"""Whether to try to verify other peers' certificates and how to
behave if verification fails. This attribute must be one of
ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED.
"""
return self._verify_mode
def __set_verify_mode(self, value):
"""Setter for verify_mode."""
self._verify_mode = value
verify_mode = property(__get_verify_mode, __set_verify_mode)
def load_cert_chain(self, certfile, keyfile=None):
"""Load a private key and the corresponding certificate. The certfile
string must be the path to a single file in PEM format containing the
certificate as well as any number of CA certificates needed to
establish the certificate's authenticity. The keyfile string, if
present, must point to a file containing the private key. Otherwise
the private key will be taken from certfile as well.
"""
self._certfile = certfile
self._keyfile = keyfile
def load_verify_locations(self, cafile=None, dummy=None):
"""Load a set of "certification authority"(CA) certificates used to
validate other peers' certificates when `~verify_mode` is other than
ssl.CERT_NONE.
"""
self._cafile = cafile
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, dummy=None):
"""Wrap an existing Python socket sock and return an ssl.SSLSocket
object.
"""
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
server_side=server_side,
cert_reqs=self._verify_mode,
ssl_version=self._protocol,
ca_certs=self._cafile,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs) | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
/**
* Servlet utility methods.
*/
@InterfaceAudience.Private
class ServletUtils {
/**
* Extract a query string parameter without triggering http parameters
* processing by the servlet container.
*
* @param request the request
* @param name the parameter to get the value.
* @return the parameter value, or <code>NULL</code> if the parameter is not
* defined.
* @throws IOException thrown if there was an error parsing the query string.
*/
public static String getParameter(HttpServletRequest request, String name)
throws IOException {
String queryString = request.getQueryString();
if (queryString == null) {
return null;
}
List<NameValuePair> list = URLEncodedUtils.parse(queryString, StandardCharsets.UTF_8);
if (list != null) {
for (NameValuePair nv : list) {
if (name.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java |
import logging
import re
class CPLog(object):
context = ''
replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key', 'passkey']
Env = None
is_develop = False
def __init__(self, context = ''):
if context.endswith('.main'):
context = context[:-5]
self.context = context
self.logger = logging.getLogger()
def setup(self):
if not self.Env:
from couchpotato.environment import Env
self.Env = Env
self.is_develop = Env.get('dev')
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.close)
def close(self, *args, **kwargs):
logging.shutdown()
def info(self, msg, replace_tuple = ()):
self.logger.info(self.addContext(msg, replace_tuple))
def info2(self, msg, replace_tuple = ()):
self.logger.log(19, self.addContext(msg, replace_tuple))
def debug(self, msg, replace_tuple = ()):
self.logger.debug(self.addContext(msg, replace_tuple))
def error(self, msg, replace_tuple = ()):
self.logger.error(self.addContext(msg, replace_tuple))
def warning(self, msg, replace_tuple = ()):
self.logger.warning(self.addContext(msg, replace_tuple))
def critical(self, msg, replace_tuple = ()):
self.logger.critical(self.addContext(msg, replace_tuple), exc_info = 1)
def addContext(self, msg, replace_tuple = ()):
return '[%+25.25s] %s' % (self.context[-25:], self.safeMessage(msg, replace_tuple))
def safeMessage(self, msg, replace_tuple = ()):
from couchpotato.core.helpers.encoding import ss, toUnicode
msg = ss(msg)
try:
if isinstance(replace_tuple, tuple):
msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
elif isinstance(replace_tuple, dict):
msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems())
else:
msg = msg % ss(replace_tuple)
except Exception as e:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
self.setup()
if not self.is_develop:
for replace in self.replace_private:
msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg)
msg = re.sub('(&%s=)[^\&]+' % replace, '&%s=xxx' % replace, msg)
# Replace api key
try:
api_key = self.Env.setting('api_key')
if api_key:
msg = msg.replace(api_key, 'API_KEY')
except:
pass
return toUnicode(msg) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the response and request objects.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import pickle
from io import BytesIO
from datetime import datetime
from werkzeug._compat import iteritems
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import wrappers
from werkzeug.exceptions import SecurityError
from werkzeug.wsgi import LimitedStream
from werkzeug.datastructures import MultiDict, ImmutableOrderedMultiDict, \
ImmutableList, ImmutableTypeConversionDict, CharsetAccept, \
MIMEAccept, LanguageAccept, Accept, CombinedMultiDict
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug._compat import implements_iterator, text_type
class RequestTestResponse(wrappers.BaseResponse):
"""Subclass of the normal response class we use to test response
and base classes. Has some methods to test if things in the
response match.
"""
def __init__(self, response, status, headers):
wrappers.BaseResponse.__init__(self, response, status, headers)
self.body_data = pickle.loads(self.get_data())
def __getitem__(self, key):
return self.body_data[key]
def request_demo_app(environ, start_response):
request = wrappers.BaseRequest(environ)
assert 'werkzeug.request' in environ
start_response('200 OK', [('Content-Type', 'text/plain')])
return [pickle.dumps({
'args': request.args,
'args_as_list': list(request.args.lists()),
'form': request.form,
'form_as_list': list(request.form.lists()),
'environ': prepare_environ_pickle(request.environ),
'data': request.get_data()
})]
def prepare_environ_pickle(environ):
result = {}
for key, value in iteritems(environ):
try:
pickle.dumps((key, value))
except Exception:
continue
result[key] = value
return result
class WrappersTestCase(WerkzeugTestCase):
def assert_environ(self, environ, method):
self.assert_strict_equal(environ['REQUEST_METHOD'], method)
self.assert_strict_equal(environ['PATH_INFO'], '/')
self.assert_strict_equal(environ['SCRIPT_NAME'], '')
self.assert_strict_equal(environ['SERVER_NAME'], 'localhost')
self.assert_strict_equal(environ['wsgi.version'], (1, 0))
self.assert_strict_equal(environ['wsgi.url_scheme'], 'http')
def test_base_request(self):
client = Client(request_demo_app, RequestTestResponse)
# get requests
response = client.get('/?foo=bar&foo=hehe')
self.assert_strict_equal(response['args'], MultiDict([('foo', u'bar'), ('foo', u'hehe')]))
self.assert_strict_equal(response['args_as_list'], [('foo', [u'bar', u'hehe'])])
self.assert_strict_equal(response['form'], MultiDict())
self.assert_strict_equal(response['form_as_list'], [])
self.assert_strict_equal(response['data'], b'')
self.assert_environ(response['environ'], 'GET')
# post requests with form data
response = client.post('/?blub=blah', data='foo=blub+hehe&blah=42',
content_type='application/x-www-form-urlencoded')
self.assert_strict_equal(response['args'], MultiDict([('blub', u'blah')]))
self.assert_strict_equal(response['args_as_list'], [('blub', [u'blah'])])
self.assert_strict_equal(response['form'], MultiDict([('foo', u'blub hehe'), ('blah', u'42')]))
self.assert_strict_equal(response['data'], b'')
# currently we do not guarantee that the values are ordered correctly
# for post data.
## self.assert_strict_equal(response['form_as_list'], [('foo', ['blub hehe']), ('blah', ['42'])])
self.assert_environ(response['environ'], 'POST')
# patch requests with form data
response = client.patch('/?blub=blah', data='foo=blub+hehe&blah=42',
content_type='application/x-www-form-urlencoded')
self.assert_strict_equal(response['args'], MultiDict([('blub', u'blah')]))
self.assert_strict_equal(response['args_as_list'], [('blub', [u'blah'])])
self.assert_strict_equal(response['form'],
MultiDict([('foo', u'blub hehe'), ('blah', u'42')]))
self.assert_strict_equal(response['data'], b'')
self.assert_environ(response['environ'], 'PATCH')
# post requests with json data
json = b'{"foo": "bar", "blub": "blah"}'
response = client.post('/?a=b', data=json, content_type='application/json')
self.assert_strict_equal(response['data'], json)
self.assert_strict_equal(response['args'], MultiDict([('a', u'b')]))
self.assert_strict_equal(response['form'], MultiDict())
def test_query_string_is_bytes(self):
req = wrappers.Request.from_values(u'/?foo=%2f')
self.assert_strict_equal(req.query_string, b'foo=%2f')
def test_access_route(self):
req = wrappers.Request.from_values(headers={
'X-Forwarded-For': '192.168.1.2, 192.168.1.1'
})
req.environ['REMOTE_ADDR'] = '192.168.1.3'
self.assert_equal(req.access_route, ['192.168.1.2', '192.168.1.1'])
self.assert_strict_equal(req.remote_addr, '192.168.1.3')
req = wrappers.Request.from_values()
req.environ['REMOTE_ADDR'] = '192.168.1.3'
self.assert_strict_equal(list(req.access_route), ['192.168.1.3'])
def test_url_request_descriptors(self):
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
self.assert_strict_equal(req.path, u'/bar')
self.assert_strict_equal(req.full_path, u'/bar?foo=baz')
self.assert_strict_equal(req.script_root, u'/test')
self.assert_strict_equal(req.url, u'http://example.com/test/bar?foo=baz')
self.assert_strict_equal(req.base_url, u'http://example.com/test/bar')
self.assert_strict_equal(req.url_root, u'http://example.com/test/')
self.assert_strict_equal(req.host_url, u'http://example.com/')
self.assert_strict_equal(req.host, 'example.com')
self.assert_strict_equal(req.scheme, 'http')
req = wrappers.Request.from_values('/bar?foo=baz', 'https://example.com/test')
self.assert_strict_equal(req.scheme, 'https')
def test_url_request_descriptors_query_quoting(self):
next = 'http%3A%2F%2Fwww.example.com%2F%3Fnext%3D%2F'
req = wrappers.Request.from_values('/bar?next=' + next, 'http://example.com/')
self.assert_equal(req.path, u'/bar')
self.assert_strict_equal(req.full_path, u'/bar?next=' + next)
self.assert_strict_equal(req.url, u'http://example.com/bar?next=' + next)
def test_url_request_descriptors_hosts(self):
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
req.trusted_hosts = ['example.com']
self.assert_strict_equal(req.path, u'/bar')
self.assert_strict_equal(req.full_path, u'/bar?foo=baz')
self.assert_strict_equal(req.script_root, u'/test')
self.assert_strict_equal(req.url, u'http://example.com/test/bar?foo=baz')
self.assert_strict_equal(req.base_url, u'http://example.com/test/bar')
self.assert_strict_equal(req.url_root, u'http://example.com/test/')
self.assert_strict_equal(req.host_url, u'http://example.com/')
self.assert_strict_equal(req.host, 'example.com')
self.assert_strict_equal(req.scheme, 'http')
req = wrappers.Request.from_values('/bar?foo=baz', 'https://example.com/test')
self.assert_strict_equal(req.scheme, 'https')
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
req.trusted_hosts = ['example.org']
self.assert_raises(SecurityError, lambda: req.url)
self.assert_raises(SecurityError, lambda: req.base_url)
self.assert_raises(SecurityError, lambda: req.url_root)
self.assert_raises(SecurityError, lambda: req.host_url)
self.assert_raises(SecurityError, lambda: req.host)
def test_authorization_mixin(self):
request = wrappers.Request.from_values(headers={
'Authorization': 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='
})
a = request.authorization
self.assert_strict_equal(a.type, 'basic')
self.assert_strict_equal(a.username, 'Aladdin')
self.assert_strict_equal(a.password, 'open sesame')
def test_stream_only_mixing(self):
request = wrappers.PlainRequest.from_values(
data=b'foo=blub+hehe',
content_type='application/x-www-form-urlencoded'
)
self.assert_equal(list(request.files.items()), [])
self.assert_equal(list(request.form.items()), [])
self.assert_raises(AttributeError, lambda: request.data)
self.assert_strict_equal(request.stream.read(), b'foo=blub+hehe')
def test_base_response(self):
# unicode
response = wrappers.BaseResponse(u'öäü')
self.assert_strict_equal(response.get_data(), u'öäü'.encode('utf-8'))
# writing
response = wrappers.Response('foo')
response.stream.write('bar')
self.assert_strict_equal(response.get_data(), b'foobar')
# set cookie
response = wrappers.BaseResponse()
response.set_cookie('foo', 'bar', 60, 0, '/blub', 'example.org')
self.assert_strict_equal(response.headers.to_wsgi_list(), [
('Content-Type', 'text/plain; charset=utf-8'),
('Set-Cookie', 'foo=bar; Domain=example.org; Expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=60; Path=/blub')
])
# delete cookie
response = wrappers.BaseResponse()
response.delete_cookie('foo')
self.assert_strict_equal(response.headers.to_wsgi_list(), [
('Content-Type', 'text/plain; charset=utf-8'),
('Set-Cookie', 'foo=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/')
])
# close call forwarding
closed = []
@implements_iterator
class Iterable(object):
def __next__(self):
raise StopIteration()
def __iter__(self):
return self
def close(self):
closed.append(True)
response = wrappers.BaseResponse(Iterable())
response.call_on_close(lambda: closed.append(True))
app_iter, status, headers = run_wsgi_app(response,
create_environ(),
buffered=True)
self.assert_strict_equal(status, '200 OK')
self.assert_strict_equal(''.join(app_iter), '')
self.assert_strict_equal(len(closed), 2)
# with statement
del closed[:]
response = wrappers.BaseResponse(Iterable())
with response:
pass
self.assert_equal(len(closed), 1)
def test_response_status_codes(self):
response = wrappers.BaseResponse()
response.status_code = 404
self.assert_strict_equal(response.status, '404 NOT FOUND')
response.status = '200 OK'
self.assert_strict_equal(response.status_code, 200)
response.status = '999 WTF'
self.assert_strict_equal(response.status_code, 999)
response.status_code = 588
self.assert_strict_equal(response.status_code, 588)
self.assert_strict_equal(response.status, '588 UNKNOWN')
response.status = 'wtf'
self.assert_strict_equal(response.status_code, 0)
self.assert_strict_equal(response.status, '0 wtf')
def test_type_forcing(self):
def wsgi_application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ['Hello World!']
base_response = wrappers.BaseResponse('Hello World!', content_type='text/html')
class SpecialResponse(wrappers.Response):
def foo(self):
return 42
# good enough for this simple application, but don't ever use that in
# real world examples!
fake_env = {}
for orig_resp in wsgi_application, base_response:
response = SpecialResponse.force_type(orig_resp, fake_env)
assert response.__class__ is SpecialResponse
self.assert_strict_equal(response.foo(), 42)
self.assert_strict_equal(response.get_data(), b'Hello World!')
self.assert_equal(response.content_type, 'text/html')
# without env, no arbitrary conversion
self.assert_raises(TypeError, SpecialResponse.force_type, wsgi_application)
def test_accept_mixin(self):
request = wrappers.Request({
'HTTP_ACCEPT': 'text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us,en;q=0.5'
})
self.assert_equal(request.accept_mimetypes, MIMEAccept([
('text/xml', 1), ('image/png', 1), ('application/xml', 1),
('application/xhtml+xml', 1), ('text/html', 0.9),
('text/plain', 0.8), ('*/*', 0.5)
]))
self.assert_strict_equal(request.accept_charsets, CharsetAccept([
('ISO-8859-1', 1), ('utf-8', 0.7), ('*', 0.7)
]))
self.assert_strict_equal(request.accept_encodings, Accept([
('gzip', 1), ('deflate', 1)]))
self.assert_strict_equal(request.accept_languages, LanguageAccept([
('en-us', 1), ('en', 0.5)]))
request = wrappers.Request({'HTTP_ACCEPT': ''})
self.assert_strict_equal(request.accept_mimetypes, MIMEAccept())
def test_etag_request_mixin(self):
request = wrappers.Request({
'HTTP_CACHE_CONTROL': 'no-store, no-cache',
'HTTP_IF_MATCH': 'w/"foo", bar, "baz"',
'HTTP_IF_NONE_MATCH': 'w/"foo", bar, "baz"',
'HTTP_IF_MODIFIED_SINCE': 'Tue, 22 Jan 2008 11:18:44 GMT',
'HTTP_IF_UNMODIFIED_SINCE': 'Tue, 22 Jan 2008 11:18:44 GMT'
})
assert request.cache_control.no_store
assert request.cache_control.no_cache
for etags in request.if_match, request.if_none_match:
assert etags('bar')
assert etags.contains_raw('w/"foo"')
assert etags.contains_weak('foo')
assert not etags.contains('foo')
self.assert_equal(request.if_modified_since, datetime(2008, 1, 22, 11, 18, 44))
self.assert_equal(request.if_unmodified_since, datetime(2008, 1, 22, 11, 18, 44))
def test_user_agent_mixin(self):
user_agents = [
('Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.11) '
'Gecko/20071127 Firefox/2.0.0.11', 'firefox', 'macos', '2.0.0.11',
'en-US'),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de-DE) Opera 8.54',
'opera', 'windows', '8.54', 'de-DE'),
('Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 '
'(KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
'safari', 'iphone', '419.3', 'en'),
('Bot Googlebot/2.1 ( http://www.googlebot.com/bot.html)',
'google', None, '2.1', None)
]
for ua, browser, platform, version, lang in user_agents:
request = wrappers.Request({'HTTP_USER_AGENT': ua})
self.assert_strict_equal(request.user_agent.browser, browser)
self.assert_strict_equal(request.user_agent.platform, platform)
self.assert_strict_equal(request.user_agent.version, version)
self.assert_strict_equal(request.user_agent.language, lang)
assert bool(request.user_agent)
self.assert_strict_equal(request.user_agent.to_header(), ua)
self.assert_strict_equal(str(request.user_agent), ua)
request = wrappers.Request({'HTTP_USER_AGENT': 'foo'})
assert not request.user_agent
def test_stream_wrapping(self):
class LowercasingStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, size=-1):
return self._stream.read(size).lower()
def readline(self, size=-1):
return self._stream.readline(size).lower()
data = b'foo=Hello+World'
req = wrappers.Request.from_values('/', method='POST', data=data,
content_type='application/x-www-form-urlencoded')
req.stream = LowercasingStream(req.stream)
self.assert_equal(req.form['foo'], 'hello world')
def test_data_descriptor_triggers_parsing(self):
data = b'foo=Hello+World'
req = wrappers.Request.from_values('/', method='POST', data=data,
content_type='application/x-www-form-urlencoded')
self.assert_equal(req.data, b'')
self.assert_equal(req.form['foo'], u'Hello World')
def test_get_data_method_parsing_caching_behavior(self):
data = b'foo=Hello+World'
req = wrappers.Request.from_values('/', method='POST', data=data,
content_type='application/x-www-form-urlencoded')
# get_data() caches, so form stays available
self.assert_equal(req.get_data(), data)
self.assert_equal(req.form['foo'], u'Hello World')
self.assert_equal(req.get_data(), data)
# here we access the form data first, caching is bypassed
req = wrappers.Request.from_values('/', method='POST', data=data,
content_type='application/x-www-form-urlencoded')
self.assert_equal(req.form['foo'], u'Hello World')
self.assert_equal(req.get_data(), b'')
# Another case is uncached get data which trashes everything
req = wrappers.Request.from_values('/', method='POST', data=data,
content_type='application/x-www-form-urlencoded')
self.assert_equal(req.get_data(cache=False), data)
self.assert_equal(req.get_data(cache=False), b'')
self.assert_equal(req.form, {})
# Or we can implicitly start the form parser which is similar to
# the old .data behavior
req = wrappers.Request.from_values('/', method='POST', data=data,
content_type='application/x-www-form-urlencoded')
self.assert_equal(req.get_data(parse_form_data=True), b'')
self.assert_equal(req.form['foo'], u'Hello World')
def test_etag_response_mixin(self):
response = wrappers.Response('Hello World')
self.assert_equal(response.get_etag(), (None, None))
response.add_etag()
self.assert_equal(response.get_etag(), ('b10a8db164e0754105b7a99be72e3fe5', False))
assert not response.cache_control
response.cache_control.must_revalidate = True
response.cache_control.max_age = 60
response.headers['Content-Length'] = len(response.get_data())
assert response.headers['Cache-Control'] in ('must-revalidate, max-age=60',
'max-age=60, must-revalidate')
assert 'date' not in response.headers
env = create_environ()
env.update({
'REQUEST_METHOD': 'GET',
'HTTP_IF_NONE_MATCH': response.get_etag()[0]
})
response.make_conditional(env)
assert 'date' in response.headers
# after the thing is invoked by the server as wsgi application
# (we're emulating this here), there must not be any entity
# headers left and the status code would have to be 304
resp = wrappers.Response.from_app(response, env)
self.assert_equal(resp.status_code, 304)
assert not 'content-length' in resp.headers
# make sure date is not overriden
response = wrappers.Response('Hello World')
response.date = 1337
d = response.date
response.make_conditional(env)
self.assert_equal(response.date, d)
# make sure content length is only set if missing
response = wrappers.Response('Hello World')
response.content_length = 999
response.make_conditional(env)
self.assert_equal(response.content_length, 999)
def test_etag_response_mixin_freezing(self):
class WithFreeze(wrappers.ETagResponseMixin, wrappers.BaseResponse):
pass
class WithoutFreeze(wrappers.BaseResponse, wrappers.ETagResponseMixin):
pass
response = WithFreeze('Hello World')
response.freeze()
self.assert_strict_equal(response.get_etag(),
(text_type(wrappers.generate_etag(b'Hello World')), False))
response = WithoutFreeze('Hello World')
response.freeze()
self.assert_equal(response.get_etag(), (None, None))
response = wrappers.Response('Hello World')
response.freeze()
self.assert_equal(response.get_etag(), (None, None))
def test_authenticate_mixin(self):
resp = wrappers.Response()
resp.www_authenticate.type = 'basic'
resp.www_authenticate.realm = 'Testing'
self.assert_strict_equal(resp.headers['WWW-Authenticate'], u'Basic realm="Testing"')
resp.www_authenticate.realm = None
resp.www_authenticate.type = None
assert 'WWW-Authenticate' not in resp.headers
def test_response_stream_mixin(self):
response = wrappers.Response()
response.stream.write('Hello ')
response.stream.write('World!')
self.assert_equal(response.response, ['Hello ', 'World!'])
self.assert_equal(response.get_data(), b'Hello World!')
def test_common_response_descriptors_mixin(self):
response = wrappers.Response()
response.mimetype = 'text/html'
self.assert_equal(response.mimetype, 'text/html')
self.assert_equal(response.content_type, 'text/html; charset=utf-8')
self.assert_equal(response.mimetype_params, {'charset': 'utf-8'})
response.mimetype_params['x-foo'] = 'yep'
del response.mimetype_params['charset']
self.assert_equal(response.content_type, 'text/html; x-foo=yep')
now = datetime.utcnow().replace(microsecond=0)
assert response.content_length is None
response.content_length = '42'
self.assert_equal(response.content_length, 42)
for attr in 'date', 'age', 'expires':
assert getattr(response, attr) is None
setattr(response, attr, now)
self.assert_equal(getattr(response, attr), now)
assert response.retry_after is None
response.retry_after = now
self.assert_equal(response.retry_after, now)
assert not response.vary
response.vary.add('Cookie')
response.vary.add('Content-Language')
assert 'cookie' in response.vary
self.assert_equal(response.vary.to_header(), 'Cookie, Content-Language')
response.headers['Vary'] = 'Content-Encoding'
self.assert_equal(response.vary.as_set(), set(['content-encoding']))
response.allow.update(['GET', 'POST'])
self.assert_equal(response.headers['Allow'], 'GET, POST')
response.content_language.add('en-US')
response.content_language.add('fr')
self.assert_equal(response.headers['Content-Language'], 'en-US, fr')
def test_common_request_descriptors_mixin(self):
request = wrappers.Request.from_values(content_type='text/html; charset=utf-8',
content_length='23',
headers={
'Referer': 'http://www.example.com/',
'Date': 'Sat, 28 Feb 2009 19:04:35 GMT',
'Max-Forwards': '10',
'Pragma': 'no-cache',
'Content-Encoding': 'gzip',
'Content-MD5': '9a3bc6dbc47a70db25b84c6e5867a072'
})
self.assert_equal(request.content_type, 'text/html; charset=utf-8')
self.assert_equal(request.mimetype, 'text/html')
self.assert_equal(request.mimetype_params, {'charset': 'utf-8'})
self.assert_equal(request.content_length, 23)
self.assert_equal(request.referrer, 'http://www.example.com/')
self.assert_equal(request.date, datetime(2009, 2, 28, 19, 4, 35))
self.assert_equal(request.max_forwards, 10)
self.assert_true('no-cache' in request.pragma)
self.assert_equal(request.content_encoding, 'gzip')
self.assert_equal(request.content_md5, '9a3bc6dbc47a70db25b84c6e5867a072')
def test_shallow_mode(self):
request = wrappers.Request({'QUERY_STRING': 'foo=bar'}, shallow=True)
self.assert_equal(request.args['foo'], 'bar')
self.assert_raises(RuntimeError, lambda: request.form['foo'])
def test_form_parsing_failed(self):
data = (
b'--blah\r\n'
)
data = wrappers.Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
assert not data.files
assert not data.form
def test_file_closing(self):
data = (b'--foo\r\n'
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
b'file contents, just the contents\r\n'
b'--foo--')
req = wrappers.Request.from_values(
input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST'
)
foo = req.files['foo']
self.assert_equal(foo.mimetype, 'text/plain')
self.assert_equal(foo.filename, 'foo.txt')
self.assert_equal(foo.closed, False)
req.close()
self.assert_equal(foo.closed, True)
def test_file_closing_with(self):
data = (b'--foo\r\n'
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
b'file contents, just the contents\r\n'
b'--foo--')
req = wrappers.Request.from_values(
input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST'
)
with req:
foo = req.files['foo']
self.assert_equal(foo.mimetype, 'text/plain')
self.assert_equal(foo.filename, 'foo.txt')
self.assert_equal(foo.closed, True)
def test_url_charset_reflection(self):
req = wrappers.Request.from_values()
req.charset = 'utf-7'
self.assert_equal(req.url_charset, 'utf-7')
def test_response_streamed(self):
r = wrappers.Response()
assert not r.is_streamed
r = wrappers.Response("Hello World")
assert not r.is_streamed
r = wrappers.Response(["foo", "bar"])
assert not r.is_streamed
def gen():
if 0:
yield None
r = wrappers.Response(gen())
assert r.is_streamed
def test_response_iter_wrapping(self):
def uppercasing(iterator):
for item in iterator:
yield item.upper()
def generator():
yield 'foo'
yield 'bar'
req = wrappers.Request.from_values()
resp = wrappers.Response(generator())
del resp.headers['Content-Length']
resp.response = uppercasing(resp.iter_encoded())
actual_resp = wrappers.Response.from_app(resp, req.environ, buffered=True)
self.assertEqual(actual_resp.get_data(), b'FOOBAR')
def test_response_freeze(self):
def generate():
yield "foo"
yield "bar"
resp = wrappers.Response(generate())
resp.freeze()
self.assert_equal(resp.response, [b'foo', b'bar'])
self.assert_equal(resp.headers['content-length'], '6')
def test_other_method_payload(self):
data = b'Hello World'
req = wrappers.Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='text/plain',
method='WHAT_THE_FUCK')
self.assert_equal(req.get_data(), data)
self.assert_is_instance(req.stream, LimitedStream)
def test_urlfication(self):
resp = wrappers.Response()
resp.headers['Location'] = u'http://üser:pässword@☃.net/påth'
resp.headers['Content-Location'] = u'http://☃.net/'
headers = resp.get_wsgi_headers(create_environ())
self.assert_equal(headers['location'], \
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
self.assert_equal(headers['content-location'], 'http://xn--n3h.net/')
def test_new_response_iterator_behavior(self):
req = wrappers.Request.from_values()
resp = wrappers.Response(u'Hello Wörld!')
def get_content_length(resp):
headers = resp.get_wsgi_headers(req.environ)
return headers.get('content-length', type=int)
def generate_items():
yield "Hello "
yield u"Wörld!"
# werkzeug encodes when set to `data` now, which happens
# if a string is passed to the response object.
self.assert_equal(resp.response, [u'Hello Wörld!'.encode('utf-8')])
self.assert_equal(resp.get_data(), u'Hello Wörld!'.encode('utf-8'))
self.assert_equal(get_content_length(resp), 13)
assert not resp.is_streamed
assert resp.is_sequence
# try the same for manual assignment
resp.set_data(u'Wörd')
self.assert_equal(resp.response, [u'Wörd'.encode('utf-8')])
self.assert_equal(resp.get_data(), u'Wörd'.encode('utf-8'))
self.assert_equal(get_content_length(resp), 5)
assert not resp.is_streamed
assert resp.is_sequence
# automatic generator sequence conversion
resp.response = generate_items()
assert resp.is_streamed
assert not resp.is_sequence
self.assert_equal(resp.get_data(), u'Hello Wörld!'.encode('utf-8'))
self.assert_equal(resp.response, [b'Hello ', u'Wörld!'.encode('utf-8')])
assert not resp.is_streamed
assert resp.is_sequence
# automatic generator sequence conversion
resp.response = generate_items()
resp.implicit_sequence_conversion = False
assert resp.is_streamed
assert not resp.is_sequence
self.assert_raises(RuntimeError, lambda: resp.get_data())
resp.make_sequence()
self.assert_equal(resp.get_data(), u'Hello Wörld!'.encode('utf-8'))
self.assert_equal(resp.response, [b'Hello ', u'Wörld!'.encode('utf-8')])
assert not resp.is_streamed
assert resp.is_sequence
# stream makes it a list no matter how the conversion is set
for val in True, False:
resp.implicit_sequence_conversion = val
resp.response = ("foo", "bar")
assert resp.is_sequence
resp.stream.write('baz')
self.assert_equal(resp.response, ['foo', 'bar', 'baz'])
def test_form_data_ordering(self):
class MyRequest(wrappers.Request):
parameter_storage_class = ImmutableOrderedMultiDict
req = MyRequest.from_values('/?foo=1&bar=0&foo=3')
self.assert_equal(list(req.args), ['foo', 'bar'])
self.assert_equal(list(req.args.items(multi=True)), [
('foo', '1'),
('bar', '0'),
('foo', '3')
])
self.assert_is_instance(req.args, ImmutableOrderedMultiDict)
self.assert_is_instance(req.values, CombinedMultiDict)
self.assert_equal(req.values['foo'], '1')
self.assert_equal(req.values.getlist('foo'), ['1', '3'])
def test_storage_classes(self):
class MyRequest(wrappers.Request):
dict_storage_class = dict
list_storage_class = list
parameter_storage_class = dict
req = MyRequest.from_values('/?foo=baz', headers={
'Cookie': 'foo=bar'
})
assert type(req.cookies) is dict
self.assert_equal(req.cookies, {'foo': 'bar'})
assert type(req.access_route) is list
assert type(req.args) is dict
assert type(req.values) is CombinedMultiDict
self.assert_equal(req.values['foo'], u'baz')
req = wrappers.Request.from_values(headers={
'Cookie': 'foo=bar'
})
assert type(req.cookies) is ImmutableTypeConversionDict
self.assert_equal(req.cookies, {'foo': 'bar'})
assert type(req.access_route) is ImmutableList
MyRequest.list_storage_class = tuple
req = MyRequest.from_values()
assert type(req.access_route) is tuple
def test_response_headers_passthrough(self):
headers = wrappers.Headers()
resp = wrappers.Response(headers=headers)
assert resp.headers is headers
def test_response_304_no_content_length(self):
resp = wrappers.Response('Test', status=304)
env = create_environ()
assert 'content-length' not in resp.get_wsgi_headers(env)
def test_ranges(self):
# basic range stuff
req = wrappers.Request.from_values()
assert req.range is None
req = wrappers.Request.from_values(headers={'Range': 'bytes=0-499'})
self.assert_equal(req.range.ranges, [(0, 500)])
resp = wrappers.Response()
resp.content_range = req.range.make_content_range(1000)
self.assert_equal(resp.content_range.units, 'bytes')
self.assert_equal(resp.content_range.start, 0)
self.assert_equal(resp.content_range.stop, 500)
self.assert_equal(resp.content_range.length, 1000)
self.assert_equal(resp.headers['Content-Range'], 'bytes 0-499/1000')
resp.content_range.unset()
assert 'Content-Range' not in resp.headers
resp.headers['Content-Range'] = 'bytes 0-499/1000'
self.assert_equal(resp.content_range.units, 'bytes')
self.assert_equal(resp.content_range.start, 0)
self.assert_equal(resp.content_range.stop, 500)
self.assert_equal(resp.content_range.length, 1000)
def test_auto_content_length(self):
resp = wrappers.Response('Hello World!')
self.assert_equal(resp.content_length, 12)
resp = wrappers.Response(['Hello World!'])
assert resp.content_length is None
self.assert_equal(resp.get_wsgi_headers({})['Content-Length'], '12')
def test_disabled_auto_content_length(self):
class MyResponse(wrappers.Response):
automatically_set_content_length = False
resp = MyResponse('Hello World!')
self.assert_is_none(resp.content_length)
resp = MyResponse(['Hello World!'])
self.assert_is_none(resp.content_length)
self.assert_not_in('Content-Length', resp.get_wsgi_headers({}))
def test_location_header_autocorrect(self):
env = create_environ()
class MyResponse(wrappers.Response):
autocorrect_location_header = False
resp = MyResponse('Hello World!')
resp.headers['Location'] = '/test'
self.assert_equal(resp.get_wsgi_headers(env)['Location'], '/test')
resp = wrappers.Response('Hello World!')
resp.headers['Location'] = '/test'
self.assert_equal(resp.get_wsgi_headers(env)['Location'], 'http://localhost/test')
def test_modified_url_encoding(self):
class ModifiedRequest(wrappers.Request):
url_charset = 'euc-kr'
req = ModifiedRequest.from_values(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_strict_equal(req.args['foo'], u'정상처리')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WrappersTestCase))
return suite | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
const HEADERS = `// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT`
const fs = require('fs')
for (const file of ['index.js', 'index.d.ts']) {
const content = fs.readFileSync(file, 'utf8')
const newContent = `${HEADERS}\n\n${content}`
fs.writeFileSync(file, newContent, 'utf8')
} | javascript | github | https://github.com/tauri-apps/tauri | packages/cli/append-headers.js |
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {
Component,
ElementRef,
effect,
input,
output,
signal,
viewChild,
ChangeDetectionStrategy,
linkedSignal,
computed,
} from '@angular/core';
import {FormsModule} from '@angular/forms';
import {Property} from '../object-tree-types';
type EditorType = string | number | boolean;
type EditorResult = EditorType | Array<EditorType>;
enum PropertyEditorState {
Read,
Write,
}
const parseValue = (value: EditorResult): EditorResult => {
try {
return JSON.parse(value as any) as EditorResult;
} catch {
return value.toString();
}
};
@Component({
templateUrl: './property-editor.component.html',
selector: 'ng-property-editor',
styleUrls: ['./property-editor.component.scss'],
imports: [FormsModule],
changeDetection: ChangeDetectionStrategy.OnPush,
host: {
'(click)': 'onClick()',
},
})
export class PropertyEditorComponent {
protected readonly inputEl = viewChild<ElementRef<HTMLInputElement>>('inputEl');
protected readonly property = input.required<Property>();
protected readonly initialValue = computed(() => this.property().descriptor.value);
protected readonly updateValue = output<EditorResult>();
readState = PropertyEditorState.Read;
writeState = PropertyEditorState.Write;
readonly valueToSubmit = linkedSignal<EditorResult | undefined>(this.initialValue);
readonly currentPropertyState = signal(this.readState);
constructor() {
effect(() => {
const editor = this.inputEl()?.nativeElement;
if (editor && this.currentPropertyState() === this.writeState) {
editor.focus();
}
});
}
accept(): void {
const parsed = parseValue(this.valueToSubmit()!);
this.updateValue.emit(parsed);
this.currentPropertyState.set(this.readState);
}
reject(): void {
this.valueToSubmit.set(this.initialValue());
this.currentPropertyState.set(this.readState);
}
onClick(): void {
if (this.currentPropertyState() === this.readState) {
this.currentPropertyState.set(this.writeState);
}
}
onFocus() {
// A slight timeout is required for text selection.
setTimeout(() => {
this.inputEl()?.nativeElement.select();
});
}
onBlur(): void {
if (this.currentPropertyState() === this.writeState) {
this.accept();
}
}
} | typescript | github | https://github.com/angular/angular | devtools/projects/ng-devtools/src/lib/shared/object-tree-explorer/property-editor/property-editor.component.ts |
from util.ncconv.experimental.ocg_stat import OcgStatFunction
class Section(object):
_title = None
def __init__(self,lines=[],title=None):
self.lines = lines
self.title = title or self._title
@property
def formatted_title(self):
msg = '=== {0} ==='.format(self._title)
return(msg)
def add_line(self,line):
self.lines.append(line)
def format(self):
frmt = [self.formatted_title] + self.lines
return(frmt)
class RequestSection(Section):
def __init__(self,request,**kwds):
self.request = request
super(RequestSection,self).__init__(**kwds)
def format(self):
frmt = [self.formatted_title] + self.get_lines()
return(frmt)
def get_lines(self):
return(['(None)'])
class SectionGeneratedUrl(RequestSection):
_title = "Generated URL"
def get_lines(self):
return([self.request.build_absolute_uri()])
class SectionTemporalRange(RequestSection):
_title = 'Temporal Range (inclusive)'
def get_lines(self):
return(['Lower :: {0}'.format(self.request.ocg.temporal[0]),
'Upper :: {0}'.format(self.request.ocg.temporal[1])])
class SectionSpatial(RequestSection):
_title = 'Spatial Operations Performed'
_descs = {
'Intersect':'Grid cells overlapping or sharing a border with the AOI geometry are included.',
'Clip':'Full geometric intersection of grid cell and AOI geometries.',
'Aggregate=False':'Geometries are not merged.',
'Aggregate=True':'Geometries merged and climate variable aggregated using area-weighted mean.'
}
def get_lines(self):
spatial = self.request.ocg.operation.title()
aggregate = 'Aggregate={0}'.format(self.request.ocg.aggregate)
lines = [
'{0} :: {1}'.format(spatial,
self._descs[spatial]),
'{0} :: {1}'.format(aggregate,
self._descs[aggregate]),
]
return(lines)
class SectionGrouping(RequestSection):
_title = 'Temporal Grouping Method'
def _extract_(self):
grps = [str(a.title()) for a in self.request.ocg.query.grouping]
return(['-'.join(grps)])
def get_lines(self):
try:
lines = self._extract_()
except:
lines = super(SectionGrouping,self).get_lines()
return(lines)
class SectionFunction(SectionGrouping):
_title = 'Temporal Statistics Calculated'
def _extract_(self):
lines = []
msg = '{0} :: {1} :: {2}'
for ii,f in enumerate(self.request.ocg.query.functions):
## always add count & ignore it if in the function dictionary
if 'name' in f and f['name'].lower() == 'count':
continue
if ii == 0:
name = 'COUNT_AGG'
desc = 'Count of aggregated values in the series.'
else:
name = f['name'].upper()
desc = f['desc']
if f['raw']:
raw = 'on raw values'
else:
raw = 'on aggregated values'
if 'args' in f:
desc = desc.format(*f['args'])
lines.append(msg.format(name,raw,desc))
if ii == 0 and any([f['raw'] for f in self.request.ocg.query.functions]):
lines.append(msg.format('COUNT_RAW',
'on raw values',
'Count of raw values in the series.'))
return(lines)
class SectionAttributes(RequestSection):
_title = 'Attribute Definitions (non-statistical)'
_descs = {
'OCGID':'Unique record identifier (OCG=OpenClimateGIS).',
'GID':'Unique geometry identifier.',
'TID':'Unique time identifier.',
'LEVEL':('Level indicator from "1" to max level. With "1" '
'indicating level nearest the terrestrial surface. Level'
' is included for all variables.'),
'VALUE':"Requested variable's or aggregate statistic's value.",
'TIME':'Record timestamp with same units as the dataset.',
'DAY':'Day component of the timestamp.',
'MONTH':'Month component of the timestamp.',
'YEAR':'Year component of the timestamp.',
'AREA_M2':'Area of geometry in square meters using SRID 3005 as area-preserving projection.'
}
def get_lines(self,use_stat=False):
attrs = ['OCGID','GID','TID','TIME','LEVEL','VALUE','DAY','MONTH','YEAR']
lines = ['{0} :: {1}'.format(attr,self._descs[attr])
for attr in attrs]
return(lines)
class SectionLinks(RequestSection):
_attr = None
_filter_field = None
def get_lines(self):
qs = getattr(self.request.ocg,self._attr)
metalist = qs.metadata_list(
request=self.request,
filter_field=self._filter_field,
)
return(metalist)
class SectionArchive(SectionLinks):
_title = 'Climate Data Archive'
_attr = 'archive'
class SectionScenario(SectionLinks):
_title = 'Emissions Scenario'
_attr = 'scenario'
class SectionClimateModel(SectionLinks):
_title = 'Climate Model'
_attr = 'climate_model'
_filter_field = 'model'
class SectionVariable(SectionLinks):
_title = 'Output Variable'
_attr = 'variable'
class SectionSimulationOutput(SectionLinks):
_title = 'Simulation Output'
_attr = 'simulation_output' | unknown | codeparrot/codeparrot-clean | ||
/* This file is generated by scripts/process-messages/index.js. Do not edit! */
export * from '../shared/errors.js';
/**
* The node API `AsyncLocalStorage` is not available, but is required to use async server rendering.
* @returns {never}
*/
export function async_local_storage_unavailable() {
const error = new Error(`async_local_storage_unavailable\nThe node API \`AsyncLocalStorage\` is not available, but is required to use async server rendering.\nhttps://svelte.dev/e/async_local_storage_unavailable`);
error.name = 'Svelte error';
throw error;
}
/**
* Encountered asynchronous work while rendering synchronously.
* @returns {never}
*/
export function await_invalid() {
const error = new Error(`await_invalid\nEncountered asynchronous work while rendering synchronously.\nhttps://svelte.dev/e/await_invalid`);
error.name = 'Svelte error';
throw error;
}
/**
* The `html` property of server render results has been deprecated. Use `body` instead.
* @returns {never}
*/
export function html_deprecated() {
const error = new Error(`html_deprecated\nThe \`html\` property of server render results has been deprecated. Use \`body\` instead.\nhttps://svelte.dev/e/html_deprecated`);
error.name = 'Svelte error';
throw error;
}
/**
* Attempted to set `hydratable` with key `%key%` twice with different values.
*
* %stack%
* @param {string} key
* @param {string} stack
* @returns {never}
*/
export function hydratable_clobbering(key, stack) {
const error = new Error(`hydratable_clobbering\nAttempted to set \`hydratable\` with key \`${key}\` twice with different values.
${stack}\nhttps://svelte.dev/e/hydratable_clobbering`);
error.name = 'Svelte error';
throw error;
}
/**
* Failed to serialize `hydratable` data for key `%key%`.
*
* `hydratable` can serialize anything [`uneval` from `devalue`](https://npmjs.com/package/uneval) can, plus Promises.
*
* Cause:
* %stack%
* @param {string} key
* @param {string} stack
* @returns {never}
*/
export function hydratable_serialization_failed(key, stack) {
const error = new Error(`hydratable_serialization_failed\nFailed to serialize \`hydratable\` data for key \`${key}\`.
\`hydratable\` can serialize anything [\`uneval\` from \`devalue\`](https://npmjs.com/package/uneval) can, plus Promises.
Cause:
${stack}\nhttps://svelte.dev/e/hydratable_serialization_failed`);
error.name = 'Svelte error';
throw error;
}
/**
* `csp.nonce` was set while `csp.hash` was `true`. These options cannot be used simultaneously.
* @returns {never}
*/
export function invalid_csp() {
const error = new Error(`invalid_csp\n\`csp.nonce\` was set while \`csp.hash\` was \`true\`. These options cannot be used simultaneously.\nhttps://svelte.dev/e/invalid_csp`);
error.name = 'Svelte error';
throw error;
}
/**
* `%name%(...)` is not available on the server
* @param {string} name
* @returns {never}
*/
export function lifecycle_function_unavailable(name) {
const error = new Error(`lifecycle_function_unavailable\n\`${name}(...)\` is not available on the server\nhttps://svelte.dev/e/lifecycle_function_unavailable`);
error.name = 'Svelte error';
throw error;
}
/**
* Could not resolve `render` context.
* @returns {never}
*/
export function server_context_required() {
const error = new Error(`server_context_required\nCould not resolve \`render\` context.\nhttps://svelte.dev/e/server_context_required`);
error.name = 'Svelte error';
throw error;
} | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/internal/server/errors.js |
//go:build linux
package nftabler
import (
"github.com/moby/moby/v2/daemon/libnetwork/internal/nftables"
)
// mirroredWSL2Workaround adds IPv4 NAT rule if docker's host Linux appears to
// be a guest running under WSL2 in with mirrored mode networking.
// https://learn.microsoft.com/en-us/windows/wsl/networking#mirrored-mode-networking
//
// Without mirrored mode networking, or for a packet sent from Linux, packets
// sent to 127.0.0.1 are processed as outgoing - they hit the nat-OUTPUT chain,
// which does not jump to the nat-DOCKER chain because the rule has an exception
// for "-d 127.0.0.0/8". The default action on the nat-OUTPUT chain is ACCEPT (by
// default), so the packet is delivered to 127.0.0.1 on lo, where docker-proxy
// picks it up and acts as a man-in-the-middle; it receives the packet and
// re-sends it to the container (or acks a SYN and sets up a second TCP
// connection to the container). So, the container sees packets arrive with a
// source address belonging to the network's bridge, and it is able to reply to
// that address.
//
// In WSL2's mirrored networking mode, Linux has a loopback0 device as well as lo
// (which owns 127.0.0.1 as normal). Packets sent to 127.0.0.1 from Windows to a
// server listening on Linux's 127.0.0.1 are delivered via loopback0, and
// processed as packets arriving from outside the Linux host (which they are).
//
// So, these packets hit the nat-PREROUTING chain instead of nat-OUTPUT. It would
// normally be impossible for a packet ->127.0.0.1 to arrive from outside the
// host, so the nat-PREROUTING jump to nat-DOCKER has no exception for it. The
// packet is processed by a per-bridge DNAT rule in that chain, so it is
// delivered directly to the container (not via docker-proxy) with source address
// 127.0.0.1, so the container can't respond.
//
// DNAT is normally skipped by RETURN rules in the nat-DOCKER chain for packets
// arriving from any other bridge network. Similarly, this function adds (or
// removes) a rule to RETURN early for packets delivered via loopback0 with
// destination 127.0.0.0/8.
func mirroredWSL2Workaround(tm *nftables.Modifier) {
tm.Create(nftables.Rule{
Chain: natChain,
Group: initialRuleGroup,
Rule: []string{`iifname "loopback0" ip daddr 127.0.0.0/8 counter return`},
})
} | go | github | https://github.com/moby/moby | daemon/libnetwork/drivers/bridge/internal/nftabler/wsl2.go |
"""
[2016-12-09] Challenge #294 [Hard] Rack management 3
https://www.reddit.com/r/dailyprogrammer/comments/5hcd0x/20161209_challenge_294_hard_rack_management_3/
# Description
Today's challenge is an optimization problem. I'll give you the rules of a game, loosely inspired by solitaire
Scrabble, and you try to get the best score possible. Post your score along with the moves you make. You may also post
or link to the code you used to get the score.
# Game rules
Start with an empty tile rack that can hold 10 letter tiles, and the following row of 100 tiles:
sd?zeioao?mluvepesceinfxt?wyiru?ie?giator?t??nuefje?l?odndrotpewlgoobiinysagacaqski?aeh?rbhaervtnl?m
These are the tiles you can draw from. Your turn consists of the following steps:
1. Draw tiles from the left and right sides of the row and place them on your rack. You cannot draw a tile that's not
on the left or right end of the row, and you cannot rearrange the tiles in the row. Keep drawing until you have 10
tiles on your rack, or the row is empty.
2. Play a word that appears in the [enable1 word
list](https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/dotnetperls-controls/enable1.txt)
using tiles from your rack. Blank tiles (`?`) are wild and can stand in for any single letter. Tiles used are removed
from the game. Unused tiles remain in your rack for the next turn.
Continue like this until you run out of tiles, or you can't play anymore. There's no way to discard or replace tiles in
your rack other than by playing a word. Any unused tiles in your rack or the row at the end of the game are ignored.
# Scoring
Your final score is the total score of all the plays you make.
Your score for each play is given by 1x the value of the first tile in your play, plus 2x the value of the second tile
in your play, and so on. (Same as in [this week's Intermediate
challenge](https://www.reddit.com/r/dailyprogrammer/comments/5h40ml/20161207_challenge_294_intermediate_rack/).)
The value of the letter tiles is [the same as in
Scrabble](https://en.wikipedia.org/wiki/Scrabble_letter_distributions#English). Blanks are worth 0, and the letters `a`
through `z` are worth:
[1,3,3,2,1,4,2,4,1,8,5,1,3,1,1,3,10,1,1,1,1,4,4,8,4,10]
# Output description
Here is a sample valid solution:
6 s?l?mnize solemnize
0 b?have behave
0 ?hirked shirked
5 tra?q tranq
5 ovum ovum
3 escalop escalop
6 antefix antefix
6 s?uiceway sluiceway
5 ??iggery priggery
0 sailing sailing
6 rai?bow rainbow
7 ?e?oof reroof
1 projet projet
2 unt?nded untended
1 o?t oat
Each line in a solution comprises 3 things: the number of tiles you're drawing from the left side of the row, the play
you make (including blanks), and the word you're playing (not showing blanks).
For instance, the first play involves drawing 6 tiles from the left of the row (`sd?zei`), which implies that I also
draw 4 tiles from the right of the row (`nl?m`). My rack then holds `sd?zeinl?m`, from which I play `s?l?mnize` for 121
points, ending my first turn.
The only tile still on my rack at the beginning of my second turn is `d`. I draw 0 tiles from the left, which implies I
draw 9 from the right (`?rbhaervt`), bringing my rack up to 10 tiles: `d?rbhaervt`. From this I play `b?have` for 45
points, leaving me with `drrt` at the start of my third turn. And so on.
The example above scores a total of 839 points. My personal best is 960. Can you do better?
# Verification script
Here is a Python script that verifies and scores a solution, if it's written in the above format.
import sys
from collections import Counter
N = 10 # number of tiles in the rack
words = set(line.strip() for line in open("../Downloads/enable1.txt"))
row = "sd?zeioao?mluvepesceinfxt?wyiru?ie?giator?t??nuefje?l?odndrotpewlgoobiinysagacaqski?aeh?rbhaervtnl?m"
rack = []
score = 0
for line in sys.stdin:
if not line: continue
leftdraws, play, word = line.split()
# Draw from the left
leftdraws = int(leftdraws)
assert leftdraws <= len(row), "Not enough tiles to draw from"
rack += list(row[:leftdraws])
row = row[leftdraws:]
assert len(rack) <= N, "Drew too many tiles"
# Draw remaining from the right
rightdraws = min(len(row), N - len(rack))
if rightdraws:
rack += list(row[-rightdraws:])
row = row[:-rightdraws]
# Check that play is legal
assert not Counter(play) - Counter(rack), "Cannot make given play"
assert len(play) == len(word) and all(a in ("?", b) for a, b in zip(play, word))
assert word in words
# Remove letters from rack
rack = list((Counter(rack) - Counter(play)).elements())
# Add score
tilescores = dict(zip("abcdefghijklmnopqrstuvwxyz?",
[1,3,3,2,1,4,2,4,1,8,5,1,3,1,1,3,10,1,1,1,1,4,4,8,4,10,0]))
score += sum(j * tilescores[char] for j, char in enumerate(play, 1))
print(score)
"""
def main():
pass
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Cache;
use Exception;
use Illuminate\Cache\FileStore;
use Illuminate\Contracts\Filesystem\FileNotFoundException;
use Illuminate\Filesystem\Filesystem;
use Illuminate\Support\Carbon;
use Illuminate\Support\Str;
use Mockery as m;
use PHPUnit\Framework\TestCase;
class CacheFileStoreTest extends TestCase
{
protected function tearDown(): void
{
Carbon::setTestNow(null);
parent::tearDown();
}
public function testNullIsReturnedIfFileDoesntExist()
{
$files = $this->mockFilesystem();
$files->expects($this->once())->method('get')->will($this->throwException(new FileNotFoundException));
$store = new FileStore($files, __DIR__);
$value = $store->get('foo');
$this->assertNull($value);
}
public function testPutCreatesMissingDirectories()
{
$files = $this->mockFilesystem();
$hash = sha1('foo');
$contents = '0000000000';
$full_dir = __DIR__.'/'.substr($hash, 0, 2).'/'.substr($hash, 2, 2);
$files->expects($this->once())->method('makeDirectory')->with($this->equalTo($full_dir), $this->equalTo(0777), $this->equalTo(true));
$files->expects($this->once())->method('put')->with($this->equalTo($full_dir.'/'.$hash))->willReturn(strlen($contents));
$store = new FileStore($files, __DIR__);
$result = $store->put('foo', $contents, 0);
$this->assertTrue($result);
}
public function testPutWillConsiderZeroAsEternalTime()
{
$files = $this->mockFilesystem();
$hash = sha1('O--L / key');
$filePath = __DIR__.'/'.substr($hash, 0, 2).'/'.substr($hash, 2, 2).'/'.$hash;
$ten9s = '9999999999'; // The "forever" time value.
$fileContents = $ten9s.serialize('gold');
$exclusiveLock = true;
$files->expects($this->once())->method('put')->with(
$this->equalTo($filePath),
$this->equalTo($fileContents),
$this->equalTo($exclusiveLock) // Ensure we do lock the file while putting.
)->willReturn(strlen($fileContents));
(new FileStore($files, __DIR__))->put('O--L / key', 'gold', 0);
}
public function testPutWillConsiderBigValuesAsEternalTime()
{
$files = $this->mockFilesystem();
$hash = sha1('O--L / key');
$filePath = __DIR__.'/'.substr($hash, 0, 2).'/'.substr($hash, 2, 2).'/'.$hash;
$ten9s = '9999999999'; // The "forever" time value.
$fileContents = $ten9s.serialize('gold');
$files->expects($this->once())->method('put')->with(
$this->equalTo($filePath),
$this->equalTo($fileContents),
);
(new FileStore($files, __DIR__))->put('O--L / key', 'gold', (int) $ten9s + 1);
}
public function testExpiredItemsReturnNullAndGetDeleted()
{
$files = $this->mockFilesystem();
$contents = '0000000000';
$files->expects($this->once())->method('get')->willReturn($contents);
$store = $this->getMockBuilder(FileStore::class)->onlyMethods(['forget'])->setConstructorArgs([$files, __DIR__])->getMock();
$store->expects($this->once())->method('forget');
$value = $store->get('foo');
$this->assertNull($value);
}
public function testValidItemReturnsContents()
{
$files = $this->mockFilesystem();
$contents = '9999999999'.serialize('Hello World');
$files->expects($this->once())->method('get')->willReturn($contents);
$store = new FileStore($files, __DIR__);
$this->assertSame('Hello World', $store->get('foo'));
}
public function testStoreItemProperlyStoresValues()
{
$files = $this->mockFilesystem();
$store = $this->getMockBuilder(FileStore::class)->onlyMethods(['expiration'])->setConstructorArgs([$files, __DIR__])->getMock();
$store->expects($this->once())->method('expiration')->with($this->equalTo(10))->willReturn(1111111111);
$contents = '1111111111'.serialize('Hello World');
$hash = sha1('foo');
$cache_dir = substr($hash, 0, 2).'/'.substr($hash, 2, 2);
$files->expects($this->once())->method('put')->with($this->equalTo(__DIR__.'/'.$cache_dir.'/'.$hash), $this->equalTo($contents))->willReturn(strlen($contents));
$result = $store->put('foo', 'Hello World', 10);
$this->assertTrue($result);
}
public function testStoreItemProperlySetsPermissions()
{
$files = m::mock(Filesystem::class);
$files->shouldIgnoreMissing();
$store = $this->getMockBuilder(FileStore::class)->onlyMethods(['expiration'])->setConstructorArgs([$files, __DIR__, 0644])->getMock();
$hash = sha1('foo');
$cache_dir = substr($hash, 0, 2).'/'.substr($hash, 2, 2);
$files->shouldReceive('put')->withArgs([__DIR__.'/'.$cache_dir.'/'.$hash, m::any(), m::any()])->andReturnUsing(function ($name, $value) {
return strlen($value);
});
$files->shouldReceive('chmod')->withArgs([__DIR__.'/'.$cache_dir.'/'.$hash])->andReturnValues(['0600', '0644'])->times(3);
$files->shouldReceive('chmod')->withArgs([__DIR__.'/'.$cache_dir.'/'.$hash, 0644])->andReturn([true])->once();
$result = $store->put('foo', 'foo', 10);
$this->assertTrue($result);
$result = $store->put('foo', 'bar', 10);
$this->assertTrue($result);
$result = $store->put('foo', 'baz', 10);
$this->assertTrue($result);
}
public function testStoreItemDirectoryProperlySetsPermissions()
{
$files = m::mock(Filesystem::class);
$files->shouldIgnoreMissing();
$store = $this->getMockBuilder(FileStore::class)->onlyMethods(['expiration'])->setConstructorArgs([$files, __DIR__, 0606])->getMock();
$hash = sha1('foo');
$cache_parent_dir = substr($hash, 0, 2);
$cache_dir = $cache_parent_dir.'/'.substr($hash, 2, 2);
$files->shouldReceive('put')->withArgs([__DIR__.'/'.$cache_dir.'/'.$hash, m::any(), m::any()])->andReturnUsing(function ($name, $value) {
return strlen($value);
});
$files->shouldReceive('exists')->withArgs([__DIR__.'/'.$cache_dir])->andReturn(false)->once();
$files->shouldReceive('makeDirectory')->withArgs([__DIR__.'/'.$cache_dir, 0777, true, true])->once();
$files->shouldReceive('chmod')->withArgs([__DIR__.'/'.$cache_parent_dir])->andReturn(['0600'])->once();
$files->shouldReceive('chmod')->withArgs([__DIR__.'/'.$cache_parent_dir, 0606])->andReturn([true])->once();
$files->shouldReceive('chmod')->withArgs([__DIR__.'/'.$cache_dir])->andReturn(['0600'])->once();
$files->shouldReceive('chmod')->withArgs([__DIR__.'/'.$cache_dir, 0606])->andReturn([true])->once();
$result = $store->put('foo', 'foo', 10);
$this->assertTrue($result);
}
public function testForeversAreStoredWithHighTimestamp()
{
$files = $this->mockFilesystem();
$contents = '9999999999'.serialize('Hello World');
$hash = sha1('foo');
$cache_dir = substr($hash, 0, 2).'/'.substr($hash, 2, 2);
$files->expects($this->once())->method('put')->with($this->equalTo(__DIR__.'/'.$cache_dir.'/'.$hash), $this->equalTo($contents))->willReturn(strlen($contents));
$store = new FileStore($files, __DIR__);
$result = $store->forever('foo', 'Hello World', 10);
$this->assertTrue($result);
}
public function testForeversAreNotRemovedOnIncrement()
{
$files = $this->mockFilesystem();
$contents = '9999999999'.serialize('Hello World');
$store = new FileStore($files, __DIR__);
$store->forever('foo', 'Hello World');
$store->increment('foo');
$files->expects($this->once())->method('get')->willReturn($contents);
$this->assertSame('Hello World', $store->get('foo'));
}
public function testIncrementExpiredKeys()
{
Carbon::setTestNow(Carbon::now());
$filePath = $this->getCachePath('foo');
$files = $this->mockFilesystem();
$now = Carbon::now()->getTimestamp();
$initialValue = ($now - 10).serialize(77);
$valueAfterIncrement = '9999999999'.serialize(3);
$store = new FileStore($files, __DIR__);
$files->expects($this->once())->method('get')->with($this->equalTo($filePath), $this->equalTo(true))->willReturn($initialValue);
$files->expects($this->once())->method('put')->with($this->equalTo($filePath), $this->equalTo($valueAfterIncrement));
$result = $store->increment('foo', 3);
}
public function testIncrementCanAtomicallyJump()
{
$filePath = $this->getCachePath('foo');
$files = $this->mockFilesystem();
$initialValue = '9999999999'.serialize(1);
$valueAfterIncrement = '9999999999'.serialize(4);
$store = new FileStore($files, __DIR__);
$files->expects($this->once())->method('get')->with($this->equalTo($filePath), $this->equalTo(true))->willReturn($initialValue);
$files->expects($this->once())->method('put')->with($this->equalTo($filePath), $this->equalTo($valueAfterIncrement));
$result = $store->increment('foo', 3);
$this->assertEquals(4, $result);
}
public function testDecrementCanAtomicallyJump()
{
$filePath = $this->getCachePath('foo');
$files = $this->mockFilesystem();
$initialValue = '9999999999'.serialize(2);
$valueAfterIncrement = '9999999999'.serialize(0);
$store = new FileStore($files, __DIR__);
$files->expects($this->once())->method('get')->with($this->equalTo($filePath), $this->equalTo(true))->willReturn($initialValue);
$files->expects($this->once())->method('put')->with($this->equalTo($filePath), $this->equalTo($valueAfterIncrement));
$result = $store->decrement('foo', 2);
$this->assertEquals(0, $result);
}
public function testIncrementNonNumericValues()
{
$filePath = $this->getCachePath('foo');
$files = $this->mockFilesystem();
$initialValue = '1999999909'.serialize('foo');
$valueAfterIncrement = '1999999909'.serialize(1);
$store = new FileStore($files, __DIR__);
$files->expects($this->once())->method('get')->with($this->equalTo($filePath), $this->equalTo(true))->willReturn($initialValue);
$files->expects($this->once())->method('put')->with($this->equalTo($filePath), $this->equalTo($valueAfterIncrement));
$result = $store->increment('foo');
$this->assertEquals(1, $result);
}
public function testIncrementNonExistentKeys()
{
$filePath = $this->getCachePath('foo');
$files = $this->mockFilesystem();
$valueAfterIncrement = '9999999999'.serialize(1);
$store = new FileStore($files, __DIR__);
// simulates a missing item in file store by the exception
$files->expects($this->once())->method('get')->with($this->equalTo($filePath), $this->equalTo(true))->willThrowException(new Exception);
$files->expects($this->once())->method('put')->with($this->equalTo($filePath), $this->equalTo($valueAfterIncrement));
$result = $store->increment('foo');
$this->assertIsInt($result);
$this->assertEquals(1, $result);
}
public function testIncrementDoesNotExtendCacheLife()
{
Carbon::setTestNow(Carbon::now());
$files = $this->mockFilesystem();
$expiration = Carbon::now()->addSeconds(50)->getTimestamp();
$initialValue = $expiration.serialize(1);
$valueAfterIncrement = $expiration.serialize(2);
$store = new FileStore($files, __DIR__);
$files->expects($this->once())->method('get')->willReturn($initialValue);
$hash = sha1('foo');
$cache_dir = substr($hash, 0, 2).'/'.substr($hash, 2, 2);
$files->expects($this->once())->method('put')->with($this->equalTo(__DIR__.'/'.$cache_dir.'/'.$hash), $this->equalTo($valueAfterIncrement));
$store->increment('foo');
}
public function testRemoveDeletesFileDoesntExist()
{
$files = $this->mockFilesystem();
$hash = sha1('foobull');
$cache_dir = substr($hash, 0, 2).'/'.substr($hash, 2, 2);
$files->expects($this->once())->method('exists')->with($this->equalTo(__DIR__.'/'.$cache_dir.'/'.$hash))->willReturn(false);
$store = new FileStore($files, __DIR__);
$store->forget('foobull');
}
public function testRemoveDeletesFile()
{
$files = new Filesystem;
$store = new FileStore($files, __DIR__);
$store->put('foobar', 'Hello Baby', 10);
$this->assertFileExists($store->path('foobar'));
$store->forget('foobar');
$this->assertFileDoesNotExist($store->path('foobar'));
}
public function testFlushCleansDirectory()
{
$files = $this->mockFilesystem();
$files->expects($this->once())->method('isDirectory')->with($this->equalTo(__DIR__))->willReturn(true);
$files->expects($this->once())->method('directories')->with($this->equalTo(__DIR__))->willReturn(['foo']);
$files->expects($this->once())->method('deleteDirectory')->with($this->equalTo('foo'))->willReturn(true);
$store = new FileStore($files, __DIR__);
$result = $store->flush();
$this->assertTrue($result, 'Flush failed');
}
public function testFlushFailsDirectoryClean()
{
$files = $this->mockFilesystem();
$files->expects($this->once())->method('isDirectory')->with($this->equalTo(__DIR__))->willReturn(true);
$files->expects($this->once())->method('directories')->with($this->equalTo(__DIR__))->willReturn(['foo']);
$files->expects($this->once())->method('deleteDirectory')->with($this->equalTo('foo'))->willReturn(false);
$store = new FileStore($files, __DIR__);
$result = $store->flush();
$this->assertFalse($result, 'Flush should not have cleared directories');
}
public function testFlushIgnoreNonExistingDirectory()
{
$files = $this->mockFilesystem();
$files->expects($this->once())->method('isDirectory')->with($this->equalTo(__DIR__.'--wrong'))->willReturn(false);
$store = new FileStore($files, __DIR__.'--wrong');
$result = $store->flush();
$this->assertFalse($result, 'Flush should not clean directory');
}
public function testItHandlesForgettingNonFlexibleKeys()
{
$store = new FileStore(new Filesystem, __DIR__);
$key = Str::random();
$path = $store->path($key);
$flexiblePath = "illuminate:cache:flexible:created:{$key}";
$store->put($key, 'value', 5);
$this->assertFileExists($path);
$this->assertFileDoesNotExist($flexiblePath);
$store->forget($key);
$this->assertFileDoesNotExist($path);
$this->assertFileDoesNotExist($flexiblePath);
}
public function itOnlyForgetsFlexibleKeysIfParentIsForgotten()
{
$store = new FileStore(new Filesystem, __DIR__);
$key = Str::random();
$path = $store->path($key);
$flexiblePath = "illuminate:cache:flexible:created:{$key}";
touch($flexiblePath);
$this->assertFileDoesNotExist($path);
$this->assertFileExists($flexiblePath);
$store->forget($key);
$this->assertFileDoesNotExist($path);
$this->assertFileExists($flexiblePath);
$store->put($key, 'value', 5);
$this->assertFileDoesNotExist($path);
$this->assertFileDoesNotExist($flexiblePath);
}
protected function mockFilesystem()
{
return $this->createMock(Filesystem::class);
}
protected function getCachePath($key)
{
$hash = sha1($key);
$cache_dir = substr($hash, 0, 2).'/'.substr($hash, 2, 2);
return __DIR__.'/'.$cache_dir.'/'.$hash;
}
} | php | github | https://github.com/laravel/framework | tests/Cache/CacheFileStoreTest.php |
# script to convert the newly generated Relative Humidity
def convert_to_hur( tas_arr, vap_arr ):
import numpy as np
with np.errstate( over='ignore' ):
esa_arr = 6.112 * np.exp( 17.62 * tas_arr/ (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46 * tas_arr / (272.62 + tas_arr) )
return vap_arr/esa_arr * 100
def convert_to_vap( tas_arr, hur_arr ):
import numpy as np
with np.errstate( over='ignore' ):
esa_arr = 6.112 * np.exp( 17.62 * tas_arr / (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46*tas_arr / (272.62 + tas_arr) )
return (hur_arr * esa_arr) / 100
def run( x ):
tas = rasterio.open( x[0] )
hur = rasterio.open( x[1] )
meta = tas.meta
meta[ 'dtype' ] = 'float32' # set it to float32
meta.update( compress='lzw' )
meta.pop( 'transform' )
tas_arr = tas.read( 1 )
hur_arr = hur.read( 1 )
vap_arr = convert_to_vap( tas_arr, hur_arr )
# mask it:
mask = tas.read_masks( 1 )
vap_arr[ mask == 0 ] = tas.nodata
# build an output filename from the input tas and write out -- changed to deal with pathing!
output_filename = x[1].replace( 'hur', 'vap' )
output_filename = output_filename.replace( '_metric_', '_hPa_' )
# output_filename = x[0].replace( 'tas', 'vap' )
# output_filename = output_filename.replace( '_C_', '_hPa_' )
dirname = os.path.dirname( output_filename )
try:
if not os.path.exists( dirname ):
os.makedirs( dirname )
except:
pass
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( vap_arr.astype( np.float32 ), 1 )
return output_filename
if __name__ == '__main__':
# import modules
import os, glob, rasterio
import numpy as np
from pathos import multiprocessing as mp
# args
ncores = 40
tas_input_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final/ar5'
hur_input_path = '/Data/malindgren/cru_november_final/ar5'
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
for model in models:
print model
tas_files = sorted( glob.glob( os.path.join( tas_input_path, model, 'tas', 'downscaled', '*.tif' ) ) )
hur_files = sorted( glob.glob( os.path.join( hur_input_path, model, 'hur', 'downscaled', '*.tif' ) ) )
# combine the sorted lists which should now be in a common order...
tas_hur_list = zip( tas_files, hur_files )
# run in parallel
pool = mp.Pool( processes=ncores )
out = pool.map( run, tas_hur_list )
pool.close()
# def return_files( input_path, var ):
# output_files = []
# for root, subs, files in os.walk( input_path ):
# # print root
# if root.endswith( 'downscaled' ) and len( files ) != 0 and var in root:
# pool = mp.Pool( processes=ncores )
# files = pool.map( lambda x: os.path.join( root, x ), files )
# pool.close()
# output_files.append( files )
# return output_files | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add AssessmentTemplate
Revision ID: 1894405f14ef
Revises: 1e2abee7566c
Create Date: 2016-02-25 11:49:25.128231
"""
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '1894405f14ef'
down_revision = '1e2abee7566c'
def upgrade():
"""Upgrade database schema to a new revision."""
op.create_table(
'assessment_templates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column(
'template_object_type', sa.String(length=250), nullable=True),
sa.Column('test_plan_procedure', sa.Boolean(), nullable=False),
sa.Column('procedure_description', sa.Text(), nullable=True),
sa.Column('default_people', sa.Text(), nullable=False),
sa.Column('created_at', sa.DateTime()),
sa.Column('modified_by_id', sa.Integer()),
sa.Column('updated_at', sa.DateTime()),
sa.Column(
'context_id', sa.Integer(), sa.ForeignKey('contexts.id')),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
"""Downgrade the database schema to the previous revision."""
op.drop_table('assessment_templates') | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage.strictness;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.when;
import org.assertj.core.api.ThrowableAssert;
import org.junit.Rule;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.exceptions.misusing.PotentialStubbingProblem;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
import org.mockito.quality.Strictness;
import org.mockitousage.IMethods;
public class StrictnessWithRulesTest {
@Mock IMethods mock;
@Rule public MockitoRule rule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS);
@Test
public void potential_stubbing_problem() {
// when
when(mock.simpleMethod("1")).thenReturn("1");
lenient().when(mock.differentMethod("2")).thenReturn("2");
// then on lenient stubbing, we can call it with different argument:
mock.differentMethod("200");
// but on strict stubbing, we cannot:
assertThatThrownBy(
new ThrowableAssert.ThrowingCallable() {
public void call() {
ProductionCode.simpleMethod(mock, "100");
}
})
.isInstanceOf(PotentialStubbingProblem.class);
// let's use the strict stubbing so that it is not reported as failure by the rule:
mock.simpleMethod("1");
}
@Test
public void unnecessary_stubbing() {
// this unnecessary stubbing is not flagged by the rule:
lenient().when(mock.differentMethod("2")).thenReturn("2");
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockitousage/strictness/StrictnessWithRulesTest.java |
import os
def needs_notifying(size_trigger, size_available):
"""Checks whether we need to send a notification
Args:
size_trigger: minimum amount of free space in GB
size_available: currently available free space in bytes
"""
return size_available <= (size_trigger * 1024*1024*1024)
def format_lowdisk_message(messages, hostname):
return '[{}] Low disk space on the following volumes:\n{}'.format(hostname, '\n'.join(messages))
def check_diskspace(settings, hostname):
try:
filesystems = settings.FILESYSTEMS[hostname]
except KeyError:
return ('Error during diskspace check', '[spacealarm] No filesystem configuration found for {}'.format(hostname))
messages = []
for filesystem in filesystems:
statvfs = os.statvfs(filesystem[0])
#statvfs.f_frsize * statvfs.f_blocks # Size of filesystem in bytes
#statvfs.f_frsize * statvfs.f_bfree # Actual number of free bytes
#statvfs.f_frsize * statvfs.f_bavail # Number of free bytes that ordinary users
# are allowed to use (excl. reserved space)
if needs_notifying(filesystem[1], statvfs.f_frsize * statvfs.f_bavail):
messages.append('{}: {:.1f}MB free'.format(filesystem[0], statvfs.f_frsize * statvfs.f_bavail / 1024/1024))
if messages:
return ('Diskspace warning', format_lowdisk_message(messages, hostname)) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements API tests across platforms and will never have a build
// tag.
package net
import (
"internal/testenv"
"os"
"runtime"
"testing"
"time"
)
// The full stack test cases for IPConn have been moved to the
// following:
// golang.org/x/net/ipv4
// golang.org/x/net/ipv6
// golang.org/x/net/icmp
func TestTCPListenerSpecificMethods(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Skipf("not supported on %s", runtime.GOOS)
}
la, err := ResolveTCPAddr("tcp4", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
ln, err := ListenTCP("tcp4", la)
if err != nil {
t.Fatal(err)
}
defer ln.Close()
ln.Addr()
mustSetDeadline(t, ln.SetDeadline, 30*time.Nanosecond)
if c, err := ln.Accept(); err != nil {
if !err.(Error).Timeout() {
t.Fatal(err)
}
} else {
c.Close()
}
if c, err := ln.AcceptTCP(); err != nil {
if !err.(Error).Timeout() {
t.Fatal(err)
}
} else {
c.Close()
}
if f, err := ln.File(); err != nil {
condFatalf(t, "file+net", "%v", err)
} else {
f.Close()
}
}
func TestTCPConnSpecificMethods(t *testing.T) {
la, err := ResolveTCPAddr("tcp4", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
ln, err := ListenTCP("tcp4", la)
if err != nil {
t.Fatal(err)
}
ch := make(chan error, 1)
handler := func(ls *localServer, ln Listener) { ls.transponder(ls.Listener, ch) }
ls := (&streamListener{Listener: ln}).newLocalServer()
defer ls.teardown()
if err := ls.buildup(handler); err != nil {
t.Fatal(err)
}
ra, err := ResolveTCPAddr("tcp4", ls.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
c, err := DialTCP("tcp4", nil, ra)
if err != nil {
t.Fatal(err)
}
defer c.Close()
c.SetKeepAlive(false)
c.SetKeepAlivePeriod(3 * time.Second)
c.SetLinger(0)
c.SetNoDelay(false)
c.LocalAddr()
c.RemoteAddr()
c.SetDeadline(time.Now().Add(someTimeout))
c.SetReadDeadline(time.Now().Add(someTimeout))
c.SetWriteDeadline(time.Now().Add(someTimeout))
if _, err := c.Write([]byte("TCPCONN TEST")); err != nil {
t.Fatal(err)
}
rb := make([]byte, 128)
if _, err := c.Read(rb); err != nil {
t.Fatal(err)
}
for err := range ch {
t.Error(err)
}
}
func TestUDPConnSpecificMethods(t *testing.T) {
la, err := ResolveUDPAddr("udp4", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
c, err := ListenUDP("udp4", la)
if err != nil {
t.Fatal(err)
}
defer c.Close()
c.LocalAddr()
c.RemoteAddr()
c.SetDeadline(time.Now().Add(someTimeout))
c.SetReadDeadline(time.Now().Add(someTimeout))
c.SetWriteDeadline(time.Now().Add(someTimeout))
c.SetReadBuffer(2048)
c.SetWriteBuffer(2048)
wb := []byte("UDPCONN TEST")
rb := make([]byte, 128)
if _, err := c.WriteToUDP(wb, c.LocalAddr().(*UDPAddr)); err != nil {
t.Fatal(err)
}
if _, _, err := c.ReadFromUDP(rb); err != nil {
t.Fatal(err)
}
if _, _, err := c.WriteMsgUDP(wb, nil, c.LocalAddr().(*UDPAddr)); err != nil {
condFatalf(t, c.LocalAddr().Network(), "%v", err)
}
if _, _, _, _, err := c.ReadMsgUDP(rb, nil); err != nil {
condFatalf(t, c.LocalAddr().Network(), "%v", err)
}
if f, err := c.File(); err != nil {
condFatalf(t, "file+net", "%v", err)
} else {
f.Close()
}
defer func() {
if p := recover(); p != nil {
t.Fatalf("panicked: %v", p)
}
}()
c.WriteToUDP(wb, nil)
c.WriteMsgUDP(wb, nil, nil)
}
func TestIPConnSpecificMethods(t *testing.T) {
if !testableNetwork("ip4") {
t.Skip("skipping: ip4 not supported")
}
la, err := ResolveIPAddr("ip4", "127.0.0.1")
if err != nil {
t.Fatal(err)
}
c, err := ListenIP("ip4:icmp", la)
if testenv.SyscallIsNotSupported(err) {
// May be inside a container that disallows creating a socket or
// not running as root.
t.Skipf("skipping: %v", err)
} else if err != nil {
t.Fatal(err)
}
defer c.Close()
c.LocalAddr()
c.RemoteAddr()
c.SetDeadline(time.Now().Add(someTimeout))
c.SetReadDeadline(time.Now().Add(someTimeout))
c.SetWriteDeadline(time.Now().Add(someTimeout))
c.SetReadBuffer(2048)
c.SetWriteBuffer(2048)
if f, err := c.File(); err != nil {
condFatalf(t, "file+net", "%v", err)
} else {
f.Close()
}
defer func() {
if p := recover(); p != nil {
t.Fatalf("panicked: %v", p)
}
}()
wb := []byte("IPCONN TEST")
c.WriteToIP(wb, nil)
c.WriteMsgIP(wb, nil, nil)
}
func TestUnixListenerSpecificMethods(t *testing.T) {
if !testableNetwork("unix") {
t.Skip("unix test")
}
addr := testUnixAddr(t)
la, err := ResolveUnixAddr("unix", addr)
if err != nil {
t.Fatal(err)
}
ln, err := ListenUnix("unix", la)
if err != nil {
t.Fatal(err)
}
defer ln.Close()
defer os.Remove(addr)
ln.Addr()
mustSetDeadline(t, ln.SetDeadline, 30*time.Nanosecond)
if c, err := ln.Accept(); err != nil {
if !err.(Error).Timeout() {
t.Fatal(err)
}
} else {
c.Close()
}
if c, err := ln.AcceptUnix(); err != nil {
if !err.(Error).Timeout() {
t.Fatal(err)
}
} else {
c.Close()
}
if f, err := ln.File(); err != nil {
condFatalf(t, "file+net", "%v", err)
} else {
f.Close()
}
}
func TestUnixConnSpecificMethods(t *testing.T) {
if !testableNetwork("unixgram") {
t.Skip("unixgram test")
}
addr1, addr2, addr3 := testUnixAddr(t), testUnixAddr(t), testUnixAddr(t)
a1, err := ResolveUnixAddr("unixgram", addr1)
if err != nil {
t.Fatal(err)
}
c1, err := DialUnix("unixgram", a1, nil)
if err != nil {
t.Fatal(err)
}
defer c1.Close()
defer os.Remove(addr1)
c1.LocalAddr()
c1.RemoteAddr()
c1.SetDeadline(time.Now().Add(someTimeout))
c1.SetReadDeadline(time.Now().Add(someTimeout))
c1.SetWriteDeadline(time.Now().Add(someTimeout))
c1.SetReadBuffer(2048)
c1.SetWriteBuffer(2048)
a2, err := ResolveUnixAddr("unixgram", addr2)
if err != nil {
t.Fatal(err)
}
c2, err := DialUnix("unixgram", a2, nil)
if err != nil {
t.Fatal(err)
}
defer c2.Close()
defer os.Remove(addr2)
c2.LocalAddr()
c2.RemoteAddr()
c2.SetDeadline(time.Now().Add(someTimeout))
c2.SetReadDeadline(time.Now().Add(someTimeout))
c2.SetWriteDeadline(time.Now().Add(someTimeout))
c2.SetReadBuffer(2048)
c2.SetWriteBuffer(2048)
a3, err := ResolveUnixAddr("unixgram", addr3)
if err != nil {
t.Fatal(err)
}
c3, err := ListenUnixgram("unixgram", a3)
if err != nil {
t.Fatal(err)
}
defer c3.Close()
defer os.Remove(addr3)
c3.LocalAddr()
c3.RemoteAddr()
c3.SetDeadline(time.Now().Add(someTimeout))
c3.SetReadDeadline(time.Now().Add(someTimeout))
c3.SetWriteDeadline(time.Now().Add(someTimeout))
c3.SetReadBuffer(2048)
c3.SetWriteBuffer(2048)
wb := []byte("UNIXCONN TEST")
rb1 := make([]byte, 128)
rb2 := make([]byte, 128)
rb3 := make([]byte, 128)
if _, _, err := c1.WriteMsgUnix(wb, nil, a2); err != nil {
t.Fatal(err)
}
if _, _, _, _, err := c2.ReadMsgUnix(rb2, nil); err != nil {
t.Fatal(err)
}
if _, err := c2.WriteToUnix(wb, a1); err != nil {
t.Fatal(err)
}
if _, _, err := c1.ReadFromUnix(rb1); err != nil {
t.Fatal(err)
}
if _, err := c3.WriteToUnix(wb, a1); err != nil {
t.Fatal(err)
}
if _, _, err := c1.ReadFromUnix(rb1); err != nil {
t.Fatal(err)
}
if _, err := c2.WriteToUnix(wb, a3); err != nil {
t.Fatal(err)
}
if _, _, err := c3.ReadFromUnix(rb3); err != nil {
t.Fatal(err)
}
if f, err := c1.File(); err != nil {
condFatalf(t, "file+net", "%v", err)
} else {
f.Close()
}
defer func() {
if p := recover(); p != nil {
t.Fatalf("panicked: %v", p)
}
}()
c1.WriteToUnix(wb, nil)
c1.WriteMsgUnix(wb, nil, nil)
c3.WriteToUnix(wb, nil)
c3.WriteMsgUnix(wb, nil, nil)
} | go | github | https://github.com/golang/go | src/net/protoconn_test.go |
{
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "San Francisco",
"ST": "California",
"C": "USA"
}
],
"CN": "ca",
"ca": {
"expiry": "87600h"
}
} | json | github | https://github.com/etcd-io/etcd | pkg/proxy/fixtures/ca-csr.json |
'''
Copyright 2010-2013 DIMA Research Group, TU Berlin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Dec 15, 2011
@author: Alexander Alexandrov <alexander.alexandrov@tu-berlin.de>
'''
class AbstractVisitor(object):
'''
classdocs
'''
def __init__(self, *args, **kwargs):
pass
def traverse(self, node):
node.accept(self)
def preVisit(self, node):
preVisitMethod = None
for cls in node.__class__.__mro__:
methodName = '_preVisit' + cls.__name__
preVisitMethod = getattr(self, methodName, None)
if preVisitMethod:
break
if not preVisitMethod:
preVisitMethod = self._preVisitGeneric
return preVisitMethod(node)
def postVisit(self, node):
postVisitMethod = None
for cls in node.__class__.__mro__:
methodName = '_postVisit' + cls.__name__
postVisitMethod = getattr(self, methodName, None)
if postVisitMethod:
break
if not postVisitMethod:
postVisitMethod = self._postVisitGeneric
return postVisitMethod(node)
def _preVisitGeneric(self, node):
pass
def _postVisitGeneric(self, node):
pass | unknown | codeparrot/codeparrot-clean | ||
##########################################################
# THIS IS A GENERATED FILE -- DO NOT MODIFY.
# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
# AND REGENERATE THE MATRIX SUITES.
#
# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/core_repeat_queries.yml
# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
##########################################################
executor:
archive:
hooks:
- ValidateCollections
config:
shell_options:
crashOnInvalidBSONError: ""
eval:
await import("jstests/libs/override_methods/detect_spawning_own_mongod.js");;
await import("jstests/libs/override_methods/rerun_queries.js");
objcheck: ""
fixture:
class: MongoDFixture
mongod_options:
set_parameters:
enableTestCommands: 1
hooks:
- class: ValidateCollections
shell_options:
global_vars:
TestData:
skipValidationOnNamespaceNotFound: false
- class: CleanEveryN
n: 20
matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/**/*.js
- jstests/core/query/queryable_encryption/**/*.js
- jstests/core/query/query_settings/**/*.js
exclude_with_any_tags:
- does_not_support_repeated_reads
- requires_profiling
roots:
- jstests/core/**/*.js
- jstests/core_standalone/**/*.js
test_kind: js_test | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml |
from trakt.core.errors import ERRORS
from trakt.core.exceptions import ServerError, ClientError
from trakt.helpers import setdefault
from functools import wraps
import logging
log = logging.getLogger(__name__)
def authenticated(func):
@wraps(func)
def wrap(*args, **kwargs):
kwargs['authenticated'] = True
return func(*args, **kwargs)
return wrap
def application(func):
@wraps(func)
def wrap(*args, **kwargs):
if args and isinstance(args[0], Interface):
interface = args[0]
setdefault(kwargs, {
'app_version': interface.client.configuration['app.version'],
'app_date': interface.client.configuration['app.date']
}, lambda key, value: value)
return func(*args, **kwargs)
return wrap
class Interface(object):
path = None
def __init__(self, client):
self.client = client
def __getitem__(self, name):
if hasattr(self, name):
return getattr(self, name)
raise ValueError('Unknown action "%s" on %s' % (name, self))
@property
def http(self):
if not self.client:
return None
return self.client.http.configure(self.path)
@staticmethod
def get_data(response, exceptions=False, parse=True):
if response is None:
return None
# Return response, if parse=False
if not parse:
return response
# Check status code, log any errors
error = False
if response.status_code < 200 or response.status_code >= 300:
# Lookup status code in trakt error definitions
name, desc = ERRORS.get(response.status_code, ("Unknown", "Unknown"))
log.warning('request failed: %s - "%s" (code: %s)', name, desc, response.status_code)
if exceptions:
# Raise an exception (including the response for further processing)
if response.status_code >= 500:
raise ServerError(response)
else:
raise ClientError(response)
# Set error flag
error = True
# Return `None` if we encountered an error, return response data
if error:
return None
# Parse response, return data
content_type = response.headers.get('content-type')
if content_type and content_type.startswith('application/json'):
# Try parse json response
try:
data = response.json()
except Exception as e:
log.warning('unable to parse JSON response: %s', e)
return None
else:
log.debug('response returned content-type: %r, falling back to raw data', content_type)
# Fallback to raw content
data = response.content
return data
class InterfaceProxy(object):
def __init__(self, interface, args):
self.interface = interface
self.args = list(args)
def __getattr__(self, name):
value = getattr(self.interface, name)
if not hasattr(value, '__call__'):
return value
@wraps(value)
def wrap(*args, **kwargs):
args = self.args + list(args)
return value(*args, **kwargs)
return wrap | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from rest_framework import serializers
from callcenter.models import Queue
class QueueSerializer(serializers.HyperlinkedModelSerializer):
"""
**Create**:
CURL Usage::
curl -u username:password --dump-header - -H "Content-Type:application/json" -X POST --data '{"name": "queue name"}' http://localhost:8000/rest-api/queue/
Response::
HTTP/1.0 201 CREATED
Date: Fri, 14 Jun 2013 09:52:27 GMT
Server: WSGIServer/0.1 Python/2.7.3
Vary: Accept, Accept-Language, Cookie
Content-Type: application/json; charset=utf-8
Content-Language: en-us
Location: http://localhost:8000/rest-api/queue/1/
Allow: GET, POST, HEAD, OPTIONS
**Read**:
CURL Usage::
curl -u username:password -H 'Accept: application/json' http://localhost:8000/rest-api/queue/
Response::
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"manager": "manager",
"url": "http://127.0.0.1:8000/rest-api/queue/1/",
"name": "Sample queue",
"strategy": 5,
"moh_sound": "",
"record_template": "",
"time_base_score": "queue",
"tier_rules_apply": false,
"tier_rule_wait_second": 300,
"tier_rule_wait_multiply_level": true,
"tier_rule_no_agent_no_wait": false,
"discard_abandoned_after": 14400,
"abandoned_resume_allowed": true,
"max_wait_time": 0,
"max_wait_time_with_no_agent": 120,
"max_wait_time_with_no_agent_time_reached": 5,
"created_date": "2013-10-23T12:34:20.157Z",
"updated_date": "2013-10-23T12:34:20.157Z"
}
]
}
**Update**:
CURL Usage::
curl -u username:password --dump-header - -H "Content-Type: application/json" -X PUT --data '{"name": "change name"}' http://localhost:8000/rest-api/queue/%dqueue-id%/
Response::
HTTP/1.0 202 NO CONTENT
Date: Fri, 23 Sep 2011 06:46:12 GMT
Server: WSGIServer/0.1 Python/2.7.1+
Vary: Accept-Language, Cookie
Content-Length: 0
Content-Type: text/html; charset=utf-8
Content-Language: en-us
"""
manager = serializers.Field(source='manager')
class Meta:
model = Queue | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import warnings
import getopt
from shutil import move
from os import remove, close
"""Modify meta information in raw experimental data.
While comparing algorithms with the bbob_proc package, it is sometimes
needed to change the algorithm name (given as algId in the :file`.info`
files) or the algorithm comments after a run is already finished (for
example because two output folders contain results for two different
algorithms but with the same name). This script allows to change these
within a specified output folder.
written: db 28/01/2010
db 26/06/2013 corrected documentation
"""
__all__ = ['main']
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def usage():
print main.__doc__
def main(argv=None):
"""Main routine.
This script allows to change algorithm name (algId) and algorithm
comment after a run finished, i.e., after an output folder has been
created.
:param seq argv: list of strings containing options and arguments.
If not provided, sys.argv is accessed.
:py:data:`argv` should list an output folder (first argument) and
additionally an algorithm name (2nd argument) and the algorithm
comment (3rd argument).
If only the output folder is given, the script asks for an algorithm
name and a comment interactively.
-h, --help
display this message
-v, --verbose
verbose mode, prints out operations. When not in verbose mode, no
output is to be expected, except for errors.
Examples:
* Changing algorithm name and comments for given output folder from the
command line::
$ python bbob_pproc/changeAlgIdAndComment.py outfolder "CMA-ES" "CMA_with_lambda_100"
* Changing algorithm name and comments for given output folder
interactively::
$ python bbob_pproc/changeAlgIdAndComment.py outputfolder
"""
if argv is None:
argv = sys.argv[1:]
try:
try:
opts, args = getopt.getopt(argv, "hv",
["help", "verbose"])
except getopt.error, msg:
raise Usage(msg)
if not (args):
usage()
sys.exit()
verbose = False
#Process options
for o, a in opts:
if o in ("-v","--verbose"):
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option"
# check if all arguments are there and ask for them if not:
if len(args) < 3:
if len(args) < 2:
name = raw_input("You forgot to specify an algorithm name. Please enter one (algId):")
args.append(name)
comment = raw_input("You forgot to specify a comment. Please enter one for algorithm " + args[1] + ":")
args.append(comment)
folder = args[0]
# make sure that folder name ends with a '/' to be able to append
# the file names afterwards
if not folder.endswith('/'):
folder = folder + '/'
algId = args[1]
comment = args[2]
if not os.path.exists(folder):
print "ERROR: folder " + folder + " does not exist!"
sys.exit()
if not os.path.isdir(folder):
print "ERROR: " + folder + " is not a directory"
sys.exit()
# get all .info files in folder:
FILES = []
for (path, dirs, files) in os.walk(folder):
for fname in files:
if fname.endswith('.info'):
FILES.append(os.path.join(path, fname))
for file in FILES:
# open file to read and temp file to write
infile = open(file,'r')
tempfile = open('temp.temp','w')
while infile:
line = infile.readline()
if not line:
break
# make sure that everything is copied:
newline = line
# check if something needs to be changed:
if line.find('algId') >= 0:
s = line.split()
n = 0 # compute position of 'algId'
for word in s:
n = n+1
if word=='algId':
break
# replace algId:
s = s[0:n+1]
s.append("'" + algId + "'\n")
newline = " ".join(s)
else:
s = line.split()
if '%'==s[0]:
newline = "% " + comment + "\n"
tempfile.write(newline)
infile.close()
tempfile.close()
# remove old file and rename temp file accordingly
remove(file)
move('temp.temp', file)
print(file + " changed")
sys.exit()
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use -h or --help"
return 2
if __name__ == "__main__":
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
'''
Description
Check for each generation n, compare the list of individual IDs in the sim file,
to the list of individual IDs in the nth pop in the genepop file
'''
from __future__ import print_function
__filename__ =""
__date__ = "20160825"
__author__ = "Ted Cosart<ted.cosart@umontana.edu>"
import sys
import supp_utils as supu
try:
import pgutilities as pgut
except ImportError as oie:
supu.add_main_pg_dir_to_path()
import pgutilities as pgut
#end try...except
SIMCOLINDIV=2
SIMCOLGEN=0
def getsiminfo( s_simfile ):
dli_indivbygen={}
osimfile=open( s_simfile, 'r' )
i_entrycount=0
for s_line in osimfile:
i_entrycount+=1
ls_vals=s_line.strip().split( " " )
gennum=ls_vals[SIMCOLGEN]
indivnum=ls_vals[ SIMCOLINDIV ]
if gennum in dli_indivbygen:
dli_indivbygen[gennum].append( indivnum )
else:
dli_indivbygen[gennum]=[ indivnum ]
#end if old gennum else new
#end for each line
osimfile.close()
return ( i_entrycount, dli_indivbygen )
#end getgeninfo
def compare_genepop_file( s_genfile, s_popfile ):
i_entrycount, dli_indivbygen=getsiminfo( s_genfile )
i_genepop_indiv_entries=0
currgen=-1
countindivcorrect=0
countindivwrong=0
countlocicorrect=0
countlociwrong=0
opopfile=open( s_popfile, 'r' )
i_poplinecount=0
i_pop_loci_count=0
for s_line in opopfile:
i_poplinecount+=1
ls_vals=s_line.strip().split( "," )
ispopline=( ls_vals[0]=="pop" )
if ispopline:
currgen=currgen + 1
elif currgen == -1 and i_poplinecount > 1:
i_pop_loci_count+=1
elif currgen >= 0 and not( ispopline ):
indiv=ls_vals[0]
b_indivisin=( indiv in dli_indivbygen[ str( currgen ) ] )
if b_indivisin:
countindivcorrect+=1
#end if indiv in else not
#end if in pop sections and not pop line
#end for each line in file
opopfile.close()
print ( "total sim file entries: " + str( i_entrycount ) )
print ( "correct indiv-gen associations: " + str( countindivcorrect ) )
print ( "wrong indiv-gen associations: " + str( countindivwrong ) )
#end compare_genepop_file
if __name__=="__main__":
lsargs=[ "sim file", "genepop file" ]
s_usage=pgut.do_usage_check ( sys.argv, lsargs )
if s_usage:
print( s_usage )
sys.exit()
#end if usage
s_simfile=sys.argv[1]
s_popfile=sys.argv[2]
compare_genepop_file( s_simfile, s_popfile )
#end if name is "main" | unknown | codeparrot/codeparrot-clean | ||
import warnings
from rope.base import exceptions, pyobjects, pynames, taskhandle, evaluate, worder, codeanalyze
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import occurrences, sourceutils
class Rename(object):
"""A class for performing rename refactoring
It can rename everything: classes, functions, modules, packages,
methods, variables and keyword arguments.
"""
def __init__(self, project, resource, offset=None):
"""If `offset` is None, the `resource` itself will be renamed"""
self.project = project
self.pycore = project.pycore
self.resource = resource
if offset is not None:
self.old_name = worder.get_name_at(self.resource, offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
self.old_instance, self.old_pyname = \
evaluate.eval_location2(this_pymodule, offset)
if self.old_pyname is None:
raise exceptions.RefactoringError(
'Rename refactoring should be performed'
' on resolvable python identifiers.')
else:
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
dummy_pymodule = self.pycore.get_string_module('')
self.old_instance = None
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
if resource.is_folder():
self.old_name = resource.name
else:
self.old_name = resource.name[:-3]
def get_old_name(self):
return self.old_name
def get_changes(self, new_name, in_file=None, in_hierarchy=False,
unsure=None, docs=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed for this refactoring
Parameters:
- `in_hierarchy`: when renaming a method this keyword forces
to rename all matching methods in the hierarchy
- `docs`: when `True` rename refactoring will rename
occurrences in comments and strings where the name is
visible. Setting it will make renames faster, too.
- `unsure`: decides what to do about unsure occurrences.
If `None`, they are ignored. Otherwise `unsure` is
called with an instance of `occurrence.Occurrence` as
parameter. If it returns `True`, the occurrence is
considered to be a match.
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
- `in_file`: this argument has been deprecated; use
`resources` instead.
"""
if unsure in (True, False):
warnings.warn(
'unsure parameter should be a function that returns '
'True or False', DeprecationWarning, stacklevel=2)
def unsure_func(value=unsure):
return value
unsure = unsure_func
if in_file is not None:
warnings.warn(
'`in_file` argument has been deprecated; use `resources` '
'instead. ', DeprecationWarning, stacklevel=2)
if in_file:
resources = [self.resource]
if _is_local(self.old_pyname):
resources = [self.resource]
if resources is None:
resources = self.pycore.get_python_files()
changes = ChangeSet('Renaming <%s> to <%s>' %
(self.old_name, new_name))
finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname, unsure=unsure,
docs=docs, instance=self.old_instance,
in_hierarchy=in_hierarchy and self.is_method())
job_set = task_handle.create_jobset('Collecting Changes', len(resources))
for file_ in resources:
job_set.started_job(file_.path)
new_content = rename_in_module(finder, new_name, resource=file_)
if new_content is not None:
changes.add_change(ChangeContents(file_, new_content))
job_set.finished_job()
if self._is_renaming_a_module():
resource = self.old_pyname.get_object().get_resource()
if self._is_allowed_to_move(resources, resource):
self._rename_module(resource, new_name, changes)
return changes
def _is_allowed_to_move(self, resources, resource):
if resource.is_folder():
try:
return resource.get_child('__init__.py') in resources
except exceptions.ResourceNotFoundError:
return False
else:
return resource in resources
def _is_renaming_a_module(self):
if isinstance(self.old_pyname.get_object(), pyobjects.AbstractModule):
return True
return False
def is_method(self):
pyname = self.old_pyname
return isinstance(pyname, pynames.DefinedName) and \
isinstance(pyname.get_object(), pyobjects.PyFunction) and \
isinstance(pyname.get_object().parent, pyobjects.PyClass)
def _rename_module(self, resource, new_name, changes):
if not resource.is_folder():
new_name = new_name + '.py'
parent_path = resource.parent.path
if parent_path == '':
new_location = new_name
else:
new_location = parent_path + '/' + new_name
changes.add_change(MoveResource(resource, new_location))
class ChangeOccurrences(object):
"""A class for changing the occurrences of a name in a scope
This class replaces the occurrences of a name. Note that it only
changes the scope containing the offset passed to the constructor.
What's more it does not have any side-effects. That is for
example changing occurrences of a module does not rename the
module; it merely replaces the occurrences of that module in a
scope with the given expression. This class is useful for
performing many custom refactorings.
"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self.old_name = worder.get_name_at(resource, offset)
self.pymodule = self.pycore.resource_to_pyobject(self.resource)
self.old_pyname = evaluate.eval_location(self.pymodule, offset)
def get_old_name(self):
word_finder = worder.Worder(self.resource.read())
return word_finder.get_primary_at(self.offset)
def _get_scope_offset(self):
lines = self.pymodule.lines
scope = self.pymodule.get_scope().\
get_inner_scope_for_line(lines.get_line_number(self.offset))
start = lines.get_line_start(scope.get_start())
end = lines.get_line_end(scope.get_end())
return start, end
def get_changes(self, new_name, only_calls=False, reads=True, writes=True):
changes = ChangeSet('Changing <%s> occurrences to <%s>' %
(self.old_name, new_name))
scope_start, scope_end = self._get_scope_offset()
finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname,
imports=False, only_calls=only_calls)
new_contents = rename_in_module(
finder, new_name, pymodule=self.pymodule, replace_primary=True,
region=(scope_start, scope_end), reads=reads, writes=writes)
if new_contents is not None:
changes.add_change(ChangeContents(self.resource, new_contents))
return changes
def rename_in_module(occurrences_finder, new_name, resource=None, pymodule=None,
replace_primary=False, region=None, reads=True, writes=True):
"""Returns the changed source or `None` if there is no changes"""
if resource is not None:
source_code = resource.read()
else:
source_code = pymodule.source_code
change_collector = codeanalyze.ChangeCollector(source_code)
for occurrence in occurrences_finder.find_occurrences(resource, pymodule):
if replace_primary and occurrence.is_a_fixed_primary():
continue
if replace_primary:
start, end = occurrence.get_primary_range()
else:
start, end = occurrence.get_word_range()
if (not reads and not occurrence.is_written()) or \
(not writes and occurrence.is_written()):
continue
if region is None or region[0] <= start < region[1]:
change_collector.add_change(start, end, new_name)
return change_collector.get_changed()
def _is_local(pyname):
module, lineno = pyname.get_definition_location()
if lineno is None:
return False
scope = module.get_scope().get_inner_scope_for_line(lineno)
if isinstance(pyname, pynames.DefinedName) and \
scope.get_kind() in ('Function', 'Class'):
scope = scope.parent
return scope.get_kind() == 'Function' and \
pyname in scope.get_names().values() and \
isinstance(pyname, pynames.AssignedName) | unknown | codeparrot/codeparrot-clean | ||
""" A model of an Infrastructure Cluster in CFME
:var page: A :py:class:`cfme.web_ui.Region` object describing common elements on the
Cluster pages.
"""
from functools import partial
from navmazing import NavigateToSibling, NavigateToAttribute
from cfme.fixtures import pytest_selenium as sel
from utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from utils.appliance import Navigatable
from cfme.web_ui import Quadicon, Region, listaccordion as list_acc, toolbar as tb, flash, \
paginator, match_location
from utils.pretty import Pretty
from utils.wait import wait_for
from utils.api import rest_api
details_page = Region(infoblock_type='detail')
cfg_btn = partial(tb.select, 'Configuration')
pol_btn = partial(tb.select, 'Policy')
match_page = partial(match_location, controller='ems_cluster',
title='Clusters')
# todo: since Cluster always requires provider, it will use only one way to get to Cluster Detail's
# page. But we need to fix this in the future.
class Cluster(Pretty, Navigatable):
""" Model of an infrastructure cluster in cfme
Args:
name: Name of the cluster.
provider: provider this cluster is attached to.
Note:
If given a provider_key, it will navigate through ``Infrastructure/Providers`` instead
of the direct path through ``Infrastructure/Clusters``.
"""
pretty_attrs = ['name', 'provider']
def __init__(self, name, provider, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self._short_name = self.name.split('in')[0].strip()
self.provider = provider
self.quad_name = 'cluster'
col = rest_api().collections
self._id = [cl.id for cl in col.clusters.all if cl.name == self._short_name
and cl.ems_id == self.provider.id][-1]
def delete(self, cancel=True):
"""
Deletes a cluster from CFME
Args:
cancel: Whether to cancel the deletion, defaults to True
"""
navigate_to(self, 'Details')
cfg_btn('Remove from the VMDB', invokes_alert=True)
sel.handle_alert(cancel=cancel)
def wait_for_delete(self):
wait_for(lambda: not self.exists, fail_condition=False,
message="Wait cluster to disappear", num_sec=500, fail_func=sel.refresh)
def wait_for_appear(self):
wait_for(lambda: self.exists, fail_condition=False,
message="Wait cluster to appear", num_sec=1000, fail_func=sel.refresh)
def get_detail(self, *ident):
""" Gets details from the details infoblock
The function first ensures that we are on the detail page for the specific cluster.
Args:
*ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images"
Returns: A string representing the contents of the InfoBlock's value.
"""
navigate_to(self, 'Details')
return details_page.infoblock.text(*ident)
@property
def exists(self):
try:
navigate_to(self, 'Details')
quad = Quadicon(self.name, self.quad_name)
if sel.is_displayed(quad):
return True
except sel.NoSuchElementException:
return False
@property
def id(self):
"""extracts cluster id for this cluster"""
return self._id
@property
def short_name(self):
"""returns only cluster's name exactly how it is stored in DB (without datacenter part)"""
return self._short_name
def run_smartstate_analysis(self):
navigate_to(self, 'Details')
tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True)
sel.handle_alert(cancel=False)
flash.assert_message_contain('Cluster / Deployment Role: scan successfully initiated')
@navigator.register(Cluster, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
from cfme.web_ui.menu import nav
nav._nav_to_fn('Compute', 'Infrastructure', 'Clusters')(None)
def resetter(self):
tb.select("Grid View")
sel.check(paginator.check_all())
sel.uncheck(paginator.check_all())
@navigator.register(Cluster, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.click(Quadicon(self.obj.name, self.obj.quad_name))
def am_i_here(self):
return match_page(summary="{} (Summary)".format(self.obj.name))
@navigator.register(Cluster, 'DetailsFromProvider')
class DetailsFromProvider(CFMENavigateStep):
def step(self):
navigate_to(self.obj.provider, 'Details')
list_acc.select('Relationships', 'Show all managed Clusters', by_title=True, partial=False)
sel.click(Quadicon(self.obj.name, self.obj.quad_name))
def am_i_here(self):
return match_page(summary="{} (Summary)".format(self.obj.name)) | unknown | codeparrot/codeparrot-clean | ||
"""SCons.Tool.pdflatex
Tool-specific initialization for pdflatex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdflatex.py 4043 2009/02/23 09:06:45 scons"
import SCons.Action
import SCons.Util
import SCons.Tool.pdf
import SCons.Tool.tex
PDFLaTeXAction = None
def PDFLaTeXAuxFunction(target = None, source= None, env=None):
result = SCons.Tool.tex.InternalLaTeXAuxAction( PDFLaTeXAction, target, source, env )
return result
PDFLaTeXAuxAction = None
def generate(env):
"""Add Builders and construction variables for pdflatex to an Environment."""
global PDFLaTeXAction
if PDFLaTeXAction is None:
PDFLaTeXAction = SCons.Action.Action('$PDFLATEXCOM', '$PDFLATEXCOMSTR')
global PDFLaTeXAuxAction
if PDFLaTeXAuxAction is None:
PDFLaTeXAuxAction = SCons.Action.Action(PDFLaTeXAuxFunction,
strfunction=SCons.Tool.tex.TeXLaTeXStrFunction)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ltx', PDFLaTeXAuxAction)
bld.add_action('.latex', PDFLaTeXAuxAction)
bld.add_emitter('.ltx', SCons.Tool.tex.tex_pdf_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_pdf_emitter)
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 3
def exists(env):
return env.Detect('pdflatex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_exchange
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ exchanges
description:
- This module uses rabbitMQ Rest API to create/delete exchanges
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the exchange to create
required: true
state:
description:
- Whether the exchange should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
required: false
default: "/"
durable:
description:
- whether exchange is durable or not
required: false
choices: [ "yes", "no" ]
default: yes
exchange_type:
description:
- type for the exchange
required: false
choices: [ "fanout", "direct", "headers", "topic" ]
aliases: [ "type" ]
default: direct
auto_delete:
description:
- if the exchange should delete itself after all queues/exchanges unbound from it
required: false
choices: [ "yes", "no" ]
default: no
internal:
description:
- exchange is available only for other exchanges
required: false
choices: [ "yes", "no" ]
default: no
arguments:
description:
- extra arguments for exchange. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Create direct exchange
- rabbitmq_exchange:
name: directExchange
# Create topic exchange on vhost
- rabbitmq_exchange:
name: topicExchange
type: topic
vhost: myVhost
'''
import json
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
login_user=dict(default='guest', type='str'),
login_password=dict(default='guest', type='str', no_log=True),
login_host=dict(default='localhost', type='str'),
login_port=dict(default='15672', type='str'),
vhost=dict(default='/', type='str'),
durable=dict(default=True, type='bool'),
auto_delete=dict(default=False, type='bool'),
internal=dict(default=False, type='bool'),
exchange_type=dict(default='direct', aliases=['type'], type='str'),
arguments=dict(default=dict(), type='dict')
),
supports_check_mode=True
)
result = dict(changed=False, name=module.params['name'])
url = "http://%s:%s/api/exchanges/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib_parse.quote(module.params['vhost'], ''),
urllib_parse.quote(module.params['name'], '')
)
if not HAS_REQUESTS:
module.fail_json(msg="requests library is required for this module. To install, use `pip install requests`")
# Check if exchange already exists
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']))
if r.status_code == 200:
exchange_exists = True
response = r.json()
elif r.status_code == 404:
exchange_exists = False
response = r.text
else:
module.fail_json(
msg="Invalid response from RESTAPI when trying to check if exchange exists",
details=r.text
)
if module.params['state'] == 'present':
change_required = not exchange_exists
else:
change_required = exchange_exists
# Check if attributes change on existing exchange
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
response['internal'] == module.params['internal'] and
response['type'] == module.params['exchange_type']
):
module.fail_json(
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges"
)
# Exit if check_mode
if module.check_mode:
result['changed'] = change_required
result['details'] = response
result['arguments'] = module.params['arguments']
module.exit_json(**result)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth=(module.params['login_user'], module.params['login_password']),
headers={"content-type": "application/json"},
data=json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"internal": module.params['internal'],
"type": module.params['exchange_type'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
result['changed'] = True
module.exit_json(**result)
else:
module.fail_json(
msg="Error creating exchange",
status=r.status_code,
details=r.text
)
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_tcp import Parameters
from library.modules.bigip_monitor_tcp import ModuleManager
from library.modules.bigip_monitor_tcp import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_monitor_tcp import Parameters
from ansible.modules.network.f5.bigip_monitor_tcp import ModuleManager
from ansible.modules.network.f5.bigip_monitor_tcp import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
type='TTYPE_TCP',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_module_parameters_ints_as_strings(self):
args = dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
type='TTYPE_TCP',
port='80',
interval='20',
timeout='30',
time_until_up='60',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_api_parameters(self):
args = dict(
name='foo',
defaultsFrom='/Common/parent',
send='this is a send string',
recv='this is a receive string',
destination='10.10.10.10:80',
interval=20,
timeout=30,
timeUntilUp=60
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['parent'] == '/Common/parent'
def test_create_monitor_idempotent(self, *args):
set_module_args(dict(
name='foo',
parent='tcp',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_port(self, *args):
set_module_args(dict(
name='foo',
port=800,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['port'] == 800
def test_update_interval(self, *args):
set_module_args(dict(
name='foo',
interval=10,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['interval'] == 10
def test_update_interval_larger_than_existing_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=30,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_interval_larger_than_new_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=10,
timeout=5,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_send(self, *args):
set_module_args(dict(
name='foo',
send='this is another send string',
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['send'] == 'this is another send string'
def test_update_receive(self, *args):
set_module_args(dict(
name='foo',
receive='this is another receive string',
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['receive'] == 'this is another receive string'
def test_update_timeout(self, *args):
set_module_args(dict(
name='foo',
timeout=300,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['timeout'] == 300
def test_update_time_until_up(self, *args):
set_module_args(dict(
name='foo',
time_until_up=300,
partition='Common',
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['time_until_up'] == 300 | unknown | codeparrot/codeparrot-clean | ||
class Flap(object):
"""Common registration methods for applications & blueprints"""
def __init__(self, flail):
self.flail = flail
def configure_context_processors(self, app, context_processors):
"""Sets app wide context processors."""
app.context_processor(lambda: context_processors)
def configure_app_context_processors(self, app, context_processors):
"""Sets app wide context processors from a blueprint."""
app.app_context_processor(lambda: context_processors)
def configure_jinja_globals(self, app, jinja_globals):
for k, v in jinja_globals.items():
app.jinja_env.globals[k] = v
def configure_template_filters(self, app, template_filters):
"""Sets template filters on the jinja2 environment."""
for filter_name, filter_fn in template_filters:
app.jinja_env.filters[filter_name] = filter_fn
def configure_before_handlers(self, app, before_handlers):
"""Sets before handlers."""
for before in before_handlers:
before = app.before_request(before)
def configure_before_app_handlers(self, app, before_handlers):
"""Sets app wide before handlers from a blueprint."""
for before in before_handlers:
before = app.before_app_request(before)
def configure_after_handlers(self, app, after_handlers):
"""Sets after handlers."""
for after in after_handlers:
after = app.after_request(after)
def configure_after_app_handlers(self, app, after_handlers):
"""Sets app wide after handlers from a blueprint."""
for after in after_handlers:
after = app.after_app_request(after)
def configure_log_handlers(self, app, log_handlers):
"""Sets log handlers for the app."""
for handler in log_handlers:
app.logger.addHandler(handler)
def configure_error_handlers(self, app, error_handlers):
"""Sets custom error handlers."""
for code, fn in error_handlers:
fn = app.errorhandler(code)(fn)
def configure_app_error_handlers(self, app, error_handlers):
"""Sets app wide custom error handlers from a blueprint."""
for code, fn in error_handlers:
fn = app.app_errorhandler(code)(fn)
def configure_views(self, app, views):
for v in views:
try:
getattr(v, 'register')(app)
except Exception as e:
Exception(e)
def configure_middlewares(self, app, middlewares):
"""Adds middlewares to the app."""
if middlewares:
for m in middlewares:
if isinstance(m, list) or isinstance(m, tuple):
if len(m) == 3:
mware, args, kwargs = m
new_mware = mware(app.wsgi_app, *args, **kwargs)
elif len(m) == 2:
mware, args = m
if isinstance(args, dict):
new_mware = mware(app.wsgi_app, **args)
elif isinstance(args, list) or isinstance(args, tuple):
new_mware = mware(app.wsgi_app, *args)
else:
new_mware = mware(app.wsgi_app, args)
else:
new_mware = m(app.wsgi_app)
app.wsgi_app = new_mware
@property
def app_actions(self):
return {'before_requests': self.configure_before_handlers,
'after_requests': self.configure_after_handlers,
'context_processors': self.configure_context_processors,
'jinja_globals': self.configure_jinja_globals,
'template_filters': self.configure_template_filters,
'error_handlers': self.configure_error_handlers,
'log_handlers': self.configure_log_handlers,
'middleware': self.configure_middlewares}
@property
def blueprint_actions(self):
return {'before_requests': self.configure_before_handlers,
'before_app_requests': self.configure_before_app_handlers,
'after_requests': self.configure_after_handlers,
'after_app_requests': self.configure_after_app_handlers,
'context_processors': self.configure_context_processors,
'app_context_processors': self.configure_app_context_processors,
'error_handlers': self.configure_error_handlers,
'app_error_handlers': self.configure_app_error_handlers,
'view_handlers': self.configure_views} | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* allpaths.c
* Routines to find possible search paths for processing a query
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/optimizer/path/allpaths.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/sysattr.h"
#include "access/tsmapi.h"
#include "catalog/pg_class.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_proc.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#ifdef OPTIMIZER_DEBUG
#include "nodes/print.h"
#endif
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/geqo.h"
#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/tlist.h"
#include "parser/parse_clause.h"
#include "parser/parsetree.h"
#include "partitioning/partbounds.h"
#include "port/pg_bitutils.h"
#include "rewrite/rewriteManip.h"
#include "utils/lsyscache.h"
#include "utils/selfuncs.h"
/* Bitmask flags for pushdown_safety_info.unsafeFlags */
#define UNSAFE_HAS_VOLATILE_FUNC (1 << 0)
#define UNSAFE_HAS_SET_FUNC (1 << 1)
#define UNSAFE_NOTIN_DISTINCTON_CLAUSE (1 << 2)
#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
#define UNSAFE_TYPE_MISMATCH (1 << 4)
/* results of subquery_is_pushdown_safe */
typedef struct pushdown_safety_info
{
unsigned char *unsafeFlags; /* bitmask of reasons why this target list
* column is unsafe for qual pushdown, or 0 if
* no reason. */
bool unsafeVolatile; /* don't push down volatile quals */
bool unsafeLeaky; /* don't push down leaky quals */
} pushdown_safety_info;
/* Return type for qual_is_pushdown_safe */
typedef enum pushdown_safe_type
{
PUSHDOWN_UNSAFE, /* unsafe to push qual into subquery */
PUSHDOWN_SAFE, /* safe to push qual into subquery */
PUSHDOWN_WINDOWCLAUSE_RUNCOND, /* unsafe, but may work as WindowClause
* run condition */
} pushdown_safe_type;
/* These parameters are set by GUC */
bool enable_geqo = false; /* just in case GUC doesn't set it */
bool enable_eager_aggregate = true;
int geqo_threshold;
double min_eager_agg_group_size;
int min_parallel_table_scan_size;
int min_parallel_index_scan_size;
/* Hook for plugins to get control in set_rel_pathlist() */
set_rel_pathlist_hook_type set_rel_pathlist_hook = NULL;
/* Hook for plugins to replace standard_join_search() */
join_search_hook_type join_search_hook = NULL;
static void set_base_rel_consider_startup(PlannerInfo *root);
static void set_base_rel_sizes(PlannerInfo *root);
static void setup_simple_grouped_rels(PlannerInfo *root);
static void set_base_rel_pathlists(PlannerInfo *root);
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel);
static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_grouped_rel_pathlist(PlannerInfo *root, RelOptInfo *rel);
static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
List *live_childrels,
List *all_child_pathkeys);
static Path *get_cheapest_parameterized_child_path(PlannerInfo *root,
RelOptInfo *rel,
Relids required_outer);
static void accumulate_append_subpath(Path *path,
List **subpaths,
List **special_subpaths,
List **child_append_relid_sets);
static Path *get_singleton_append_subpath(Path *path,
List **child_append_relid_sets);
static void set_dummy_rel_pathlist(RelOptInfo *rel);
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
pushdown_safety_info *safetyInfo);
static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
pushdown_safety_info *safetyInfo);
static void check_output_expressions(Query *subquery,
pushdown_safety_info *safetyInfo);
static void compare_tlist_datatypes(List *tlist, List *colTypes,
pushdown_safety_info *safetyInfo);
static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti,
RestrictInfo *rinfo,
pushdown_safety_info *safetyInfo);
static void subquery_push_qual(Query *subquery,
RangeTblEntry *rte, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual);
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
Bitmapset *extra_used_attrs);
/*
* make_one_rel
* Finds all possible access paths for executing a query, returning a
* single rel that represents the join of all base rels in the query.
*/
RelOptInfo *
make_one_rel(PlannerInfo *root, List *joinlist)
{
RelOptInfo *rel;
Index rti;
double total_pages;
/* Mark base rels as to whether we care about fast-start plans */
set_base_rel_consider_startup(root);
/*
* Compute size estimates and consider_parallel flags for each base rel.
*/
set_base_rel_sizes(root);
/*
* Build grouped relations for simple rels (i.e., base or "other" member
* relations) where possible.
*/
setup_simple_grouped_rels(root);
/*
* We should now have size estimates for every actual table involved in
* the query, and we also know which if any have been deleted from the
* query by join removal, pruned by partition pruning, or eliminated by
* constraint exclusion. So we can now compute total_table_pages.
*
* Note that appendrels are not double-counted here, even though we don't
* bother to distinguish RelOptInfos for appendrel parents, because the
* parents will have pages = 0.
*
* XXX if a table is self-joined, we will count it once per appearance,
* which perhaps is the wrong thing ... but that's not completely clear,
* and detecting self-joins here is difficult, so ignore it for now.
*/
total_pages = 0;
for (rti = 1; rti < root->simple_rel_array_size; rti++)
{
RelOptInfo *brel = root->simple_rel_array[rti];
/* there may be empty slots corresponding to non-baserel RTEs */
if (brel == NULL)
continue;
Assert(brel->relid == rti); /* sanity check on array */
if (IS_DUMMY_REL(brel))
continue;
if (IS_SIMPLE_REL(brel))
total_pages += (double) brel->pages;
}
root->total_table_pages = total_pages;
/*
* Generate access paths for each base rel.
*/
set_base_rel_pathlists(root);
/*
* Generate access paths for the entire join tree.
*/
rel = make_rel_from_joinlist(root, joinlist);
/*
* The result should join all and only the query's base + outer-join rels.
*/
Assert(bms_equal(rel->relids, root->all_query_rels));
return rel;
}
/*
* set_base_rel_consider_startup
* Set the consider_[param_]startup flags for each base-relation entry.
*
* For the moment, we only deal with consider_param_startup here; because the
* logic for consider_startup is pretty trivial and is the same for every base
* relation, we just let build_simple_rel() initialize that flag correctly to
* start with. If that logic ever gets more complicated it would probably
* be better to move it here.
*/
static void
set_base_rel_consider_startup(PlannerInfo *root)
{
/*
* Since parameterized paths can only be used on the inside of a nestloop
* join plan, there is usually little value in considering fast-start
* plans for them. However, for relations that are on the RHS of a SEMI
* or ANTI join, a fast-start plan can be useful because we're only going
* to care about fetching one tuple anyway.
*
* To minimize growth of planning time, we currently restrict this to
* cases where the RHS is a single base relation, not a join; there is no
* provision for consider_param_startup to get set at all on joinrels.
* Also we don't worry about appendrels. costsize.c's costing rules for
* nestloop semi/antijoins don't consider such cases either.
*/
ListCell *lc;
foreach(lc, root->join_info_list)
{
SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
int varno;
if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
bms_get_singleton_member(sjinfo->syn_righthand, &varno))
{
RelOptInfo *rel = find_base_rel(root, varno);
rel->consider_param_startup = true;
}
}
}
/*
* set_base_rel_sizes
* Set the size estimates (rows and widths) for each base-relation entry.
* Also determine whether to consider parallel paths for base relations.
*
* We do this in a separate pass over the base rels so that rowcount
* estimates are available for parameterized path generation, and also so
* that each rel's consider_parallel flag is set correctly before we begin to
* generate paths.
*/
static void
set_base_rel_sizes(PlannerInfo *root)
{
Index rti;
for (rti = 1; rti < root->simple_rel_array_size; rti++)
{
RelOptInfo *rel = root->simple_rel_array[rti];
RangeTblEntry *rte;
/* there may be empty slots corresponding to non-baserel RTEs */
if (rel == NULL)
continue;
Assert(rel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (rel->reloptkind != RELOPT_BASEREL)
continue;
rte = root->simple_rte_array[rti];
/*
* If parallelism is allowable for this query in general, see whether
* it's allowable for this rel in particular. We have to do this
* before set_rel_size(), because (a) if this rel is an inheritance
* parent, set_append_rel_size() will use and perhaps change the rel's
* consider_parallel flag, and (b) for some RTE types, set_rel_size()
* goes ahead and makes paths immediately.
*/
if (root->glob->parallelModeOK)
set_rel_consider_parallel(root, rel, rte);
set_rel_size(root, rel, rti, rte);
}
}
/*
* setup_simple_grouped_rels
* For each simple relation, build a grouped simple relation if eager
* aggregation is possible and if this relation can produce grouped paths.
*/
static void
setup_simple_grouped_rels(PlannerInfo *root)
{
Index rti;
/*
* If there are no aggregate expressions or grouping expressions, eager
* aggregation is not possible.
*/
if (root->agg_clause_list == NIL ||
root->group_expr_list == NIL)
return;
for (rti = 1; rti < root->simple_rel_array_size; rti++)
{
RelOptInfo *rel = root->simple_rel_array[rti];
/* there may be empty slots corresponding to non-baserel RTEs */
if (rel == NULL)
continue;
Assert(rel->relid == rti); /* sanity check on array */
Assert(IS_SIMPLE_REL(rel)); /* sanity check on rel */
(void) build_simple_grouped_rel(root, rel);
}
}
/*
* set_base_rel_pathlists
* Finds all paths available for scanning each base-relation entry.
* Sequential scan and any available indices are considered.
* Each useful path is attached to its relation's 'pathlist' field.
*/
static void
set_base_rel_pathlists(PlannerInfo *root)
{
Index rti;
for (rti = 1; rti < root->simple_rel_array_size; rti++)
{
RelOptInfo *rel = root->simple_rel_array[rti];
/* there may be empty slots corresponding to non-baserel RTEs */
if (rel == NULL)
continue;
Assert(rel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (rel->reloptkind != RELOPT_BASEREL)
continue;
set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
}
}
/*
* set_rel_size
* Set size estimates for a base relation
*/
static void
set_rel_size(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte)
{
if (rel->reloptkind == RELOPT_BASEREL &&
relation_excluded_by_constraints(root, rel, rte))
{
/*
* We proved we don't need to scan the rel via constraint exclusion,
* so set up a single dummy path for it. Here we only check this for
* regular baserels; if it's an otherrel, CE was already checked in
* set_append_rel_size().
*
* In this case, we go ahead and set up the relation's path right away
* instead of leaving it for set_rel_pathlist to do. This is because
* we don't have a convention for marking a rel as dummy except by
* assigning a dummy path to it.
*/
set_dummy_rel_pathlist(rel);
}
else if (rte->inh)
{
/* It's an "append relation", process accordingly */
set_append_rel_size(root, rel, rti, rte);
}
else
{
switch (rel->rtekind)
{
case RTE_RELATION:
if (rte->relkind == RELKIND_FOREIGN_TABLE)
{
/* Foreign table */
set_foreign_size(root, rel, rte);
}
else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
{
/*
* We could get here if asked to scan a partitioned table
* with ONLY. In that case we shouldn't scan any of the
* partitions, so mark it as a dummy rel.
*/
set_dummy_rel_pathlist(rel);
}
else if (rte->tablesample != NULL)
{
/* Sampled relation */
set_tablesample_rel_size(root, rel, rte);
}
else
{
/* Plain relation */
set_plain_rel_size(root, rel, rte);
}
break;
case RTE_SUBQUERY:
/*
* Subqueries don't support making a choice between
* parameterized and unparameterized paths, so just go ahead
* and build their paths immediately.
*/
set_subquery_pathlist(root, rel, rti, rte);
break;
case RTE_FUNCTION:
set_function_size_estimates(root, rel);
break;
case RTE_TABLEFUNC:
set_tablefunc_size_estimates(root, rel);
break;
case RTE_VALUES:
set_values_size_estimates(root, rel);
break;
case RTE_CTE:
/*
* CTEs don't support making a choice between parameterized
* and unparameterized paths, so just go ahead and build their
* paths immediately.
*/
if (rte->self_reference)
set_worktable_pathlist(root, rel, rte);
else
set_cte_pathlist(root, rel, rte);
break;
case RTE_NAMEDTUPLESTORE:
/* Might as well just build the path immediately */
set_namedtuplestore_pathlist(root, rel, rte);
break;
case RTE_RESULT:
/* Might as well just build the path immediately */
set_result_pathlist(root, rel, rte);
break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
}
}
/*
* We insist that all non-dummy rels have a nonzero rowcount estimate.
*/
Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
}
/*
* set_rel_pathlist
* Build access paths for a base relation
*/
static void
set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte)
{
if (IS_DUMMY_REL(rel))
{
/* We already proved the relation empty, so nothing more to do */
}
else if (rte->inh)
{
/* It's an "append relation", process accordingly */
set_append_rel_pathlist(root, rel, rti, rte);
}
else
{
switch (rel->rtekind)
{
case RTE_RELATION:
if (rte->relkind == RELKIND_FOREIGN_TABLE)
{
/* Foreign table */
set_foreign_pathlist(root, rel, rte);
}
else if (rte->tablesample != NULL)
{
/* Sampled relation */
set_tablesample_rel_pathlist(root, rel, rte);
}
else
{
/* Plain relation */
set_plain_rel_pathlist(root, rel, rte);
}
break;
case RTE_SUBQUERY:
/* Subquery --- fully handled during set_rel_size */
break;
case RTE_FUNCTION:
/* RangeFunction */
set_function_pathlist(root, rel, rte);
break;
case RTE_TABLEFUNC:
/* Table Function */
set_tablefunc_pathlist(root, rel, rte);
break;
case RTE_VALUES:
/* Values list */
set_values_pathlist(root, rel, rte);
break;
case RTE_CTE:
/* CTE reference --- fully handled during set_rel_size */
break;
case RTE_NAMEDTUPLESTORE:
/* tuplestore reference --- fully handled during set_rel_size */
break;
case RTE_RESULT:
/* simple Result --- fully handled during set_rel_size */
break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
}
}
/*
* Allow a plugin to editorialize on the set of Paths for this base
* relation. It could add new paths (such as CustomPaths) by calling
* add_path(), or add_partial_path() if parallel aware. It could also
* delete or modify paths added by the core code.
*/
if (set_rel_pathlist_hook)
(*set_rel_pathlist_hook) (root, rel, rti, rte);
/*
* If this is a baserel, we should normally consider gathering any partial
* paths we may have created for it. We have to do this after calling the
* set_rel_pathlist_hook, else it cannot add partial paths to be included
* here.
*
* However, if this is an inheritance child, skip it. Otherwise, we could
* end up with a very large number of gather nodes, each trying to grab
* its own pool of workers. Instead, we'll consider gathering partial
* paths for the parent appendrel.
*
* Also, if this is the topmost scan/join rel, we postpone gathering until
* the final scan/join targetlist is available (see grouping_planner).
*/
if (rel->reloptkind == RELOPT_BASEREL &&
!bms_equal(rel->relids, root->all_query_rels))
generate_useful_gather_paths(root, rel, false);
/* Now find the cheapest of the paths for this rel */
set_cheapest(rel);
/*
* If a grouped relation for this rel exists, build partial aggregation
* paths for it.
*
* Note that this can only happen after we've called set_cheapest() for
* this base rel, because we need its cheapest paths.
*/
set_grouped_rel_pathlist(root, rel);
#ifdef OPTIMIZER_DEBUG
pprint(rel);
#endif
}
/*
* set_plain_rel_size
* Set size estimates for a plain relation (no subquery, no inheritance)
*/
static void
set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
/*
* Test any partial indexes of rel for applicability. We must do this
* first since partial unique indexes can affect size estimates.
*/
check_index_predicates(root, rel);
/* Mark rel with estimated output rows, width, etc */
set_baserel_size_estimates(root, rel);
}
/*
* If this relation could possibly be scanned from within a worker, then set
* its consider_parallel flag.
*/
static void
set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte)
{
/*
* The flag has previously been initialized to false, so we can just
* return if it becomes clear that we can't safely set it.
*/
Assert(!rel->consider_parallel);
/* Don't call this if parallelism is disallowed for the entire query. */
Assert(root->glob->parallelModeOK);
/* This should only be called for baserels and appendrel children. */
Assert(IS_SIMPLE_REL(rel));
/* Assorted checks based on rtekind. */
switch (rte->rtekind)
{
case RTE_RELATION:
/*
* Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if we wrote all of its
* local buffers at the start of the query and made no changes
* thereafter (maybe we could allow hint bit changes), and if we
* taught the workers to read them. Writing a large number of
* temporary buffers could be expensive, though, and we don't have
* the rest of the necessary infrastructure right now anyway. So
* for now, bail out if we see a temporary table.
*/
if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
return;
/*
* Table sampling can be pushed down to workers if the sample
* function and its arguments are safe.
*/
if (rte->tablesample != NULL)
{
char proparallel = func_parallel(rte->tablesample->tsmhandler);
if (proparallel != PROPARALLEL_SAFE)
return;
if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
return;
}
/*
* Ask FDWs whether they can support performing a ForeignScan
* within a worker. Most often, the answer will be no. For
* example, if the nature of the FDW is such that it opens a TCP
* connection with a remote server, each parallel worker would end
* up with a separate connection, and these connections might not
* be appropriately coordinated between workers and the leader.
*/
if (rte->relkind == RELKIND_FOREIGN_TABLE)
{
Assert(rel->fdwroutine);
if (!rel->fdwroutine->IsForeignScanParallelSafe)
return;
if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
return;
}
/*
* There are additional considerations for appendrels, which we'll
* deal with in set_append_rel_size and set_append_rel_pathlist.
* For now, just set consider_parallel based on the rel's own
* quals and targetlist.
*/
break;
case RTE_SUBQUERY:
/*
* There's no intrinsic problem with scanning a subquery-in-FROM
* (as distinct from a SubPlan or InitPlan) in a parallel worker.
* If the subquery doesn't happen to have any parallel-safe paths,
* then flagging it as consider_parallel won't change anything,
* but that's true for plain tables, too. We must set
* consider_parallel based on the rel's own quals and targetlist,
* so that if a subquery path is parallel-safe but the quals and
* projection we're sticking onto it are not, we correctly mark
* the SubqueryScanPath as not parallel-safe. (Note that
* set_subquery_pathlist() might push some of these quals down
* into the subquery itself, but that doesn't change anything.)
*
* We can't push sub-select containing LIMIT/OFFSET to workers as
* there is no guarantee that the row order will be fully
* deterministic, and applying LIMIT/OFFSET will lead to
* inconsistent results at the top-level. (In some cases, where
* the result is ordered, we could relax this restriction. But it
* doesn't currently seem worth expending extra effort to do so.)
*/
{
Query *subquery = castNode(Query, rte->subquery);
if (limit_needed(subquery))
return;
}
break;
case RTE_JOIN:
/* Shouldn't happen; we're only considering baserels here. */
Assert(false);
return;
case RTE_FUNCTION:
/* Check for parallel-restricted functions. */
if (!is_parallel_safe(root, (Node *) rte->functions))
return;
break;
case RTE_TABLEFUNC:
/* not parallel safe */
return;
case RTE_VALUES:
/* Check for parallel-restricted functions. */
if (!is_parallel_safe(root, (Node *) rte->values_lists))
return;
break;
case RTE_CTE:
/*
* CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating
* the CTE would require executing a subplan that's not available
* in the worker, might be parallel-restricted, and must get
* executed only once.
*/
return;
case RTE_NAMEDTUPLESTORE:
/*
* tuplestore cannot be shared, at least without more
* infrastructure to support that.
*/
return;
case RTE_RESULT:
/* RESULT RTEs, in themselves, are no problem. */
break;
case RTE_GROUP:
/* Shouldn't happen; we're only considering baserels here. */
Assert(false);
return;
}
/*
* If there's anything in baserestrictinfo that's parallel-restricted, we
* give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that
* that would be a win in very many cases, and it might be tricky to make
* outer join clauses work correctly. It would likely break equivalence
* classes, too.
*/
if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo))
return;
/*
* Likewise, if the relation's outputs are not parallel-safe, give up.
* (Usually, they're just Vars, but sometimes they're not.)
*/
if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
return;
/* We have a winner. */
rel->consider_parallel = true;
}
/*
* set_plain_rel_pathlist
* Build access paths for a plain relation (no subquery, no inheritance)
*/
static void
set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Relids required_outer;
/*
* We don't support pushing join clauses into the quals of a seqscan, but
* it could still have required parameterization due to LATERAL refs in
* its tlist.
*/
required_outer = rel->lateral_relids;
/*
* Consider TID scans.
*
* If create_tidscan_paths returns true, then a TID scan path is forced.
* This happens when rel->baserestrictinfo contains CurrentOfExpr, because
* the executor can't handle any other type of path for such queries.
* Hence, we return without adding any other paths.
*/
if (create_tidscan_paths(root, rel))
return;
/* Consider sequential scan */
add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
/* If appropriate, consider parallel sequential scan */
if (rel->consider_parallel && required_outer == NULL)
create_plain_partial_paths(root, rel);
/* Consider index scans */
create_index_paths(root, rel);
}
/*
* create_plain_partial_paths
* Build partial access paths for parallel scan of a plain relation
*/
static void
create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
{
int parallel_workers;
parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
max_parallel_workers_per_gather);
/* If any limit was set to zero, the user doesn't want a parallel scan. */
if (parallel_workers <= 0)
return;
/* Add an unordered partial path based on a parallel sequential scan. */
add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
}
/*
* set_tablesample_rel_size
* Set size estimates for a sampled relation
*/
static void
set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
TableSampleClause *tsc = rte->tablesample;
TsmRoutine *tsm;
BlockNumber pages;
double tuples;
/*
* Test any partial indexes of rel for applicability. We must do this
* first since partial unique indexes can affect size estimates.
*/
check_index_predicates(root, rel);
/*
* Call the sampling method's estimation function to estimate the number
* of pages it will read and the number of tuples it will return. (Note:
* we assume the function returns sane values.)
*/
tsm = GetTsmRoutine(tsc->tsmhandler);
tsm->SampleScanGetSampleSize(root, rel, tsc->args,
&pages, &tuples);
/*
* For the moment, because we will only consider a SampleScan path for the
* rel, it's okay to just overwrite the pages and tuples estimates for the
* whole relation. If we ever consider multiple path types for sampled
* rels, we'll need more complication.
*/
rel->pages = pages;
rel->tuples = tuples;
/* Mark rel with estimated output rows, width, etc */
set_baserel_size_estimates(root, rel);
}
/*
* set_tablesample_rel_pathlist
* Build access paths for a sampled relation
*/
static void
set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Relids required_outer;
Path *path;
/*
* We don't support pushing join clauses into the quals of a samplescan,
* but it could still have required parameterization due to LATERAL refs
* in its tlist or TABLESAMPLE arguments.
*/
required_outer = rel->lateral_relids;
/* Consider sampled scan */
path = create_samplescan_path(root, rel, required_outer);
/*
* If the sampling method does not support repeatable scans, we must avoid
* plans that would scan the rel multiple times. Ideally, we'd simply
* avoid putting the rel on the inside of a nestloop join; but adding such
* a consideration to the planner seems like a great deal of complication
* to support an uncommon usage of second-rate sampling methods. Instead,
* if there is a risk that the query might perform an unsafe join, just
* wrap the SampleScan in a Materialize node. We can check for joins by
* counting the membership of all_query_rels (note that this correctly
* counts inheritance trees as single rels). If we're inside a subquery,
* we can't easily check whether a join might occur in the outer query, so
* just assume one is possible.
*
* GetTsmRoutine is relatively expensive compared to the other tests here,
* so check repeatable_across_scans last, even though that's a bit odd.
*/
if ((root->query_level > 1 ||
bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
!(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans))
{
path = (Path *) create_material_path(rel, path, true);
}
add_path(rel, path);
/* For the moment, at least, there are no other paths to consider */
}
/*
* set_foreign_size
* Set size estimates for a foreign table RTE
*/
static void
set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
/* Mark rel with estimated output rows, width, etc */
set_foreign_size_estimates(root, rel);
/* Let FDW adjust the size estimates, if it can */
rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
/* ... but do not let it set the rows estimate to zero */
rel->rows = clamp_row_est(rel->rows);
/*
* Also, make sure rel->tuples is not insane relative to rel->rows.
* Notably, this ensures sanity if pg_class.reltuples contains -1 and the
* FDW doesn't do anything to replace that.
*/
rel->tuples = Max(rel->tuples, rel->rows);
}
/*
* set_foreign_pathlist
* Build access paths for a foreign table RTE
*/
static void
set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
/* Call the FDW's GetForeignPaths function to generate path(s) */
rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
}
/*
* set_append_rel_size
* Set size estimates for a simple "append relation"
*
* The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of the
* individual member relations. Note that in the non-partitioned inheritance
* case, the first member relation is actually the same table as is mentioned
* in the parent RTE ... but it has a different RTE and RelOptInfo. This is
* a good thing because their outputs are not the same size.
*/
static void
set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte)
{
int parentRTindex = rti;
bool has_live_children;
double parent_tuples;
double parent_rows;
double parent_size;
double *parent_attrsizes;
int nattrs;
ListCell *l;
/* Guard against stack overflow due to overly deep inheritance tree. */
check_stack_depth();
Assert(IS_SIMPLE_REL(rel));
/*
* If this is a partitioned baserel, set the consider_partitionwise_join
* flag; currently, we only consider partitionwise joins with the baserel
* if its targetlist doesn't contain a whole-row Var.
*/
if (enable_partitionwise_join &&
rel->reloptkind == RELOPT_BASEREL &&
rte->relkind == RELKIND_PARTITIONED_TABLE &&
bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
rel->consider_partitionwise_join = true;
/*
* Initialize to compute size estimates for whole append relation.
*
* We handle tuples estimates by setting "tuples" to the total number of
* tuples accumulated from each live child, rather than using "rows".
* Although an appendrel itself doesn't directly enforce any quals, its
* child relations may. Therefore, setting "tuples" equal to "rows" for
* an appendrel isn't always appropriate, and can lead to inaccurate cost
* estimates. For example, when estimating the number of distinct values
* from an appendrel, we would be unable to adjust the estimate based on
* the restriction selectivity (see estimate_num_groups).
*
* We handle width estimates by weighting the widths of different child
* rels proportionally to their number of rows. This is sensible because
* the use of width estimates is mainly to compute the total relation
* "footprint" if we have to sort or hash it. To do this, we sum the
* total equivalent size (in "double" arithmetic) and then divide by the
* total rowcount estimate. This is done separately for the total rel
* width and each attribute.
*
* Note: if you consider changing this logic, beware that child rels could
* have zero rows and/or width, if they were excluded by constraints.
*/
has_live_children = false;
parent_tuples = 0;
parent_rows = 0;
parent_size = 0;
nattrs = rel->max_attr - rel->min_attr + 1;
parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
foreach(l, root->append_rel_list)
{
AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
int childRTindex;
RangeTblEntry *childRTE;
RelOptInfo *childrel;
List *childrinfos;
ListCell *parentvars;
ListCell *childvars;
ListCell *lc;
/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != parentRTindex)
continue;
childRTindex = appinfo->child_relid;
childRTE = root->simple_rte_array[childRTindex];
/*
* The child rel's RelOptInfo was already created during
* add_other_rels_to_query.
*/
childrel = find_base_rel(root, childRTindex);
Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
/* We may have already proven the child to be dummy. */
if (IS_DUMMY_REL(childrel))
continue;
/*
* We have to copy the parent's targetlist and quals to the child,
* with appropriate substitution of variables. However, the
* baserestrictinfo quals were already copied/substituted when the
* child RelOptInfo was built. So we don't need any additional setup
* before applying constraint exclusion.
*/
if (relation_excluded_by_constraints(root, childrel, childRTE))
{
/*
* This child need not be scanned, so we can omit it from the
* appendrel.
*/
set_dummy_rel_pathlist(childrel);
continue;
}
/*
* Constraint exclusion failed, so copy the parent's join quals and
* targetlist to the child, with appropriate variable substitutions.
*
* We skip join quals that came from above outer joins that can null
* this rel, since they would be of no value while generating paths
* for the child. This saves some effort while processing the child
* rel, and it also avoids an implementation restriction in
* adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
*/
childrinfos = NIL;
foreach(lc, rel->joininfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
childrinfos = lappend(childrinfos,
adjust_appendrel_attrs(root,
(Node *) rinfo,
1, &appinfo));
}
childrel->joininfo = childrinfos;
/*
* Now for the child's targetlist.
*
* NB: the resulting childrel->reltarget->exprs may contain arbitrary
* expressions, which otherwise would not occur in a rel's targetlist.
* Code that might be looking at an appendrel child must cope with
* such. (Normally, a rel's targetlist would only include Vars and
* PlaceHolderVars.) XXX we do not bother to update the cost or width
* fields of childrel->reltarget; not clear if that would be useful.
*/
childrel->reltarget->exprs = (List *)
adjust_appendrel_attrs(root,
(Node *) rel->reltarget->exprs,
1, &appinfo);
/*
* We have to make child entries in the EquivalenceClass data
* structures as well. This is needed either if the parent
* participates in some eclass joins (because we will want to consider
* inner-indexscan joins on the individual children) or if the parent
* has useful pathkeys (because we should try to build MergeAppend
* paths that produce those sort orderings).
*/
if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
add_child_rel_equivalences(root, appinfo, rel, childrel);
childrel->has_eclass_joins = rel->has_eclass_joins;
/*
* Note: we could compute appropriate attr_needed data for the child's
* variables, by transforming the parent's attr_needed through the
* translated_vars mapping. However, currently there's no need
* because attr_needed is only examined for base relations not
* otherrels. So we just leave the child's attr_needed empty.
*/
/*
* If we consider partitionwise joins with the parent rel, do the same
* for partitioned child rels.
*
* Note: here we abuse the consider_partitionwise_join flag by setting
* it for child rels that are not themselves partitioned. We do so to
* tell try_partitionwise_join() that the child rel is sufficiently
* valid to be used as a per-partition input, even if it later gets
* proven to be dummy. (It's not usable until we've set up the
* reltarget and EC entries, which we just did.)
*/
if (rel->consider_partitionwise_join)
childrel->consider_partitionwise_join = true;
/*
* If parallelism is allowable for this query in general, see whether
* it's allowable for this childrel in particular. But if we've
* already decided the appendrel is not parallel-safe as a whole,
* there's no point in considering parallelism for this child. For
* consistency, do this before calling set_rel_size() for the child.
*/
if (root->glob->parallelModeOK && rel->consider_parallel)
set_rel_consider_parallel(root, childrel, childRTE);
/*
* Compute the child's size.
*/
set_rel_size(root, childrel, childRTindex, childRTE);
/*
* It is possible that constraint exclusion detected a contradiction
* within a child subquery, even though we didn't prove one above. If
* so, we can skip this child.
*/
if (IS_DUMMY_REL(childrel))
continue;
/* We have at least one live child. */
has_live_children = true;
/*
* If any live child is not parallel-safe, treat the whole appendrel
* as not parallel-safe. In future we might be able to generate plans
* in which some children are farmed out to workers while others are
* not; but we don't have that today, so it's a waste to consider
* partial paths anywhere in the appendrel unless it's all safe.
* (Child rels visited before this one will be unmarked in
* set_append_rel_pathlist().)
*/
if (!childrel->consider_parallel)
rel->consider_parallel = false;
/*
* Accumulate size information from each live child.
*/
Assert(childrel->rows > 0);
parent_tuples += childrel->tuples;
parent_rows += childrel->rows;
parent_size += childrel->reltarget->width * childrel->rows;
/*
* Accumulate per-column estimates too. We need not do anything for
* PlaceHolderVars in the parent list. If child expression isn't a
* Var, or we didn't record a width estimate for it, we have to fall
* back on a datatype-based estimate.
*
* By construction, child's targetlist is 1-to-1 with parent's.
*/
forboth(parentvars, rel->reltarget->exprs,
childvars, childrel->reltarget->exprs)
{
Var *parentvar = (Var *) lfirst(parentvars);
Node *childvar = (Node *) lfirst(childvars);
if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
{
int pndx = parentvar->varattno - rel->min_attr;
int32 child_width = 0;
if (IsA(childvar, Var) &&
((Var *) childvar)->varno == childrel->relid)
{
int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
child_width = childrel->attr_widths[cndx];
}
if (child_width <= 0)
child_width = get_typavgwidth(exprType(childvar),
exprTypmod(childvar));
Assert(child_width > 0);
parent_attrsizes[pndx] += child_width * childrel->rows;
}
}
}
if (has_live_children)
{
/*
* Save the finished size estimates.
*/
int i;
Assert(parent_rows > 0);
rel->tuples = parent_tuples;
rel->rows = parent_rows;
rel->reltarget->width = rint(parent_size / parent_rows);
for (i = 0; i < nattrs; i++)
rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
/*
* Note that we leave rel->pages as zero; this is important to avoid
* double-counting the appendrel tree in total_table_pages.
*/
}
else
{
/*
* All children were excluded by constraints, so mark the whole
* appendrel dummy. We must do this in this phase so that the rel's
* dummy-ness is visible when we generate paths for other rels.
*/
set_dummy_rel_pathlist(rel);
}
pfree(parent_attrsizes);
}
/*
* set_append_rel_pathlist
* Build access paths for an "append relation"
*/
static void
set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte)
{
int parentRTindex = rti;
List *live_childrels = NIL;
ListCell *l;
/*
* Generate access paths for each member relation, and remember the
* non-dummy children.
*/
foreach(l, root->append_rel_list)
{
AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
int childRTindex;
RangeTblEntry *childRTE;
RelOptInfo *childrel;
/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != parentRTindex)
continue;
/* Re-locate the child RTE and RelOptInfo */
childRTindex = appinfo->child_relid;
childRTE = root->simple_rte_array[childRTindex];
childrel = root->simple_rel_array[childRTindex];
/*
* If set_append_rel_size() decided the parent appendrel was
* parallel-unsafe at some point after visiting this child rel, we
* need to propagate the unsafety marking down to the child, so that
* we don't generate useless partial paths for it.
*/
if (!rel->consider_parallel)
childrel->consider_parallel = false;
/*
* Compute the child's access paths.
*/
set_rel_pathlist(root, childrel, childRTindex, childRTE);
/*
* If child is dummy, ignore it.
*/
if (IS_DUMMY_REL(childrel))
continue;
/*
* Child is live, so add it to the live_childrels list for use below.
*/
live_childrels = lappend(live_childrels, childrel);
}
/* Add paths to the append relation. */
add_paths_to_append_rel(root, rel, live_childrels);
}
/*
* set_grouped_rel_pathlist
* If a grouped relation for the given 'rel' exists, build partial
* aggregation paths for it.
*/
static void
set_grouped_rel_pathlist(PlannerInfo *root, RelOptInfo *rel)
{
RelOptInfo *grouped_rel;
/*
* If there are no aggregate expressions or grouping expressions, eager
* aggregation is not possible.
*/
if (root->agg_clause_list == NIL ||
root->group_expr_list == NIL)
return;
/* Add paths to the grouped base relation if one exists. */
grouped_rel = rel->grouped_rel;
if (grouped_rel)
{
Assert(IS_GROUPED_REL(grouped_rel));
generate_grouped_paths(root, grouped_rel, rel);
set_cheapest(grouped_rel);
}
}
/*
* add_paths_to_append_rel
* Generate paths for the given append relation given the set of non-dummy
* child rels.
*
* The function collects all parameterizations and orderings supported by the
* non-dummy children. For every such parameterization or ordering, it creates
* an append path collecting one path from each non-dummy child with given
* parameterization or ordering. Similarly it collects partial paths from
* non-dummy children to create partial append paths.
*/
void
add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
List *live_childrels)
{
AppendPathInput unparameterized = {0};
AppendPathInput startup = {0};
AppendPathInput partial_only = {0};
AppendPathInput parallel_append = {0};
bool unparameterized_valid = true;
bool startup_valid = true;
bool partial_only_valid = true;
bool parallel_append_valid = true;
List *all_child_pathkeys = NIL;
List *all_child_outers = NIL;
ListCell *l;
double partial_rows = -1;
/* If appropriate, consider parallel append */
parallel_append_valid = enable_parallel_append && rel->consider_parallel;
/*
* For every non-dummy child, remember the cheapest path. Also, identify
* all pathkeys (orderings) and parameterizations (required_outer sets)
* available for the non-dummy member relations.
*/
foreach(l, live_childrels)
{
RelOptInfo *childrel = lfirst(l);
ListCell *lcp;
Path *cheapest_partial_path = NULL;
/*
* If child has an unparameterized cheapest-total path, add that to
* the unparameterized Append path we are constructing for the parent.
* If not, there's no workable unparameterized path.
*
* With partitionwise aggregates, the child rel's pathlist may be
* empty, so don't assume that a path exists here.
*/
if (childrel->pathlist != NIL &&
childrel->cheapest_total_path->param_info == NULL)
accumulate_append_subpath(childrel->cheapest_total_path,
&unparameterized.subpaths, NULL, &unparameterized.child_append_relid_sets);
else
unparameterized_valid = false;
/*
* When the planner is considering cheap startup plans, we'll also
* collect all the cheapest_startup_paths (if set) and build an
* AppendPath containing those as subpaths.
*/
if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
{
Path *cheapest_path;
/*
* With an indication of how many tuples the query should provide,
* the optimizer tries to choose the path optimal for that
* specific number of tuples.
*/
if (root->tuple_fraction > 0.0)
cheapest_path =
get_cheapest_fractional_path(childrel,
root->tuple_fraction);
else
cheapest_path = childrel->cheapest_startup_path;
/* cheapest_startup_path must not be a parameterized path. */
Assert(cheapest_path->param_info == NULL);
accumulate_append_subpath(cheapest_path,
&startup.subpaths,
NULL,
&startup.child_append_relid_sets);
}
else
startup_valid = false;
/* Same idea, but for a partial plan. */
if (childrel->partial_pathlist != NIL)
{
cheapest_partial_path = linitial(childrel->partial_pathlist);
accumulate_append_subpath(cheapest_partial_path,
&partial_only.partial_subpaths, NULL,
&partial_only.child_append_relid_sets);
}
else
partial_only_valid = false;
/*
* Same idea, but for a parallel append mixing partial and non-partial
* paths.
*/
if (parallel_append_valid)
{
Path *nppath = NULL;
nppath =
get_cheapest_parallel_safe_total_inner(childrel->pathlist);
if (cheapest_partial_path == NULL && nppath == NULL)
{
/* Neither a partial nor a parallel-safe path? Forget it. */
parallel_append_valid = false;
}
else if (nppath == NULL ||
(cheapest_partial_path != NULL &&
cheapest_partial_path->total_cost < nppath->total_cost))
{
/* Partial path is cheaper or the only option. */
Assert(cheapest_partial_path != NULL);
accumulate_append_subpath(cheapest_partial_path,
¶llel_append.partial_subpaths,
¶llel_append.subpaths,
¶llel_append.child_append_relid_sets);
}
else
{
/*
* Either we've got only a non-partial path, or we think that
* a single backend can execute the best non-partial path
* faster than all the parallel backends working together can
* execute the best partial path.
*
* It might make sense to be more aggressive here. Even if
* the best non-partial path is more expensive than the best
* partial path, it could still be better to choose the
* non-partial path if there are several such paths that can
* be given to different workers. For now, we don't try to
* figure that out.
*/
accumulate_append_subpath(nppath,
¶llel_append.subpaths,
NULL,
¶llel_append.child_append_relid_sets);
}
}
/*
* Collect lists of all the available path orderings and
* parameterizations for all the children. We use these as a
* heuristic to indicate which sort orderings and parameterizations we
* should build Append and MergeAppend paths for.
*/
foreach(lcp, childrel->pathlist)
{
Path *childpath = (Path *) lfirst(lcp);
List *childkeys = childpath->pathkeys;
Relids childouter = PATH_REQ_OUTER(childpath);
/* Unsorted paths don't contribute to pathkey list */
if (childkeys != NIL)
{
ListCell *lpk;
bool found = false;
/* Have we already seen this ordering? */
foreach(lpk, all_child_pathkeys)
{
List *existing_pathkeys = (List *) lfirst(lpk);
if (compare_pathkeys(existing_pathkeys,
childkeys) == PATHKEYS_EQUAL)
{
found = true;
break;
}
}
if (!found)
{
/* No, so add it to all_child_pathkeys */
all_child_pathkeys = lappend(all_child_pathkeys,
childkeys);
}
}
/* Unparameterized paths don't contribute to param-set list */
if (childouter)
{
ListCell *lco;
bool found = false;
/* Have we already seen this param set? */
foreach(lco, all_child_outers)
{
Relids existing_outers = (Relids) lfirst(lco);
if (bms_equal(existing_outers, childouter))
{
found = true;
break;
}
}
if (!found)
{
/* No, so add it to all_child_outers */
all_child_outers = lappend(all_child_outers,
childouter);
}
}
}
}
/*
* If we found unparameterized paths for all children, build an unordered,
* unparameterized Append path for the rel. (Note: this is correct even
* if we have zero or one live subpath due to constraint exclusion.)
*/
if (unparameterized_valid)
add_path(rel, (Path *) create_append_path(root, rel, unparameterized,
NIL, NULL, 0, false,
-1));
/* build an AppendPath for the cheap startup paths, if valid */
if (startup_valid)
add_path(rel, (Path *) create_append_path(root, rel, startup,
NIL, NULL, 0, false, -1));
/*
* Consider an append of unordered, unparameterized partial paths. Make
* it parallel-aware if possible.
*/
if (partial_only_valid && partial_only.partial_subpaths != NIL)
{
AppendPath *appendpath;
ListCell *lc;
int parallel_workers = 0;
/* Find the highest number of workers requested for any subpath. */
foreach(lc, partial_only.partial_subpaths)
{
Path *path = lfirst(lc);
parallel_workers = Max(parallel_workers, path->parallel_workers);
}
Assert(parallel_workers > 0);
/*
* If the use of parallel append is permitted, always request at least
* log2(# of children) workers. We assume it can be useful to have
* extra workers in this case because they will be spread out across
* the children. The precise formula is just a guess, but we don't
* want to end up with a radically different answer for a table with N
* partitions vs. an unpartitioned table with the same data, so the
* use of some kind of log-scaling here seems to make some sense.
*/
if (enable_parallel_append)
{
parallel_workers = Max(parallel_workers,
pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
parallel_workers = Min(parallel_workers,
max_parallel_workers_per_gather);
}
Assert(parallel_workers > 0);
/* Generate a partial append path. */
appendpath = create_append_path(root, rel, partial_only,
NIL, NULL, parallel_workers,
enable_parallel_append,
-1);
/*
* Make sure any subsequent partial paths use the same row count
* estimate.
*/
partial_rows = appendpath->path.rows;
/* Add the path. */
add_partial_path(rel, (Path *) appendpath);
}
/*
* Consider a parallel-aware append using a mix of partial and non-partial
* paths. (This only makes sense if there's at least one child which has
* a non-partial path that is substantially cheaper than any partial path;
* otherwise, we should use the append path added in the previous step.)
*/
if (parallel_append_valid && parallel_append.subpaths != NIL)
{
AppendPath *appendpath;
ListCell *lc;
int parallel_workers = 0;
/*
* Find the highest number of workers requested for any partial
* subpath.
*/
foreach(lc, parallel_append.partial_subpaths)
{
Path *path = lfirst(lc);
parallel_workers = Max(parallel_workers, path->parallel_workers);
}
/*
* Same formula here as above. It's even more important in this
* instance because the non-partial paths won't contribute anything to
* the planned number of parallel workers.
*/
parallel_workers = Max(parallel_workers,
pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
parallel_workers = Min(parallel_workers,
max_parallel_workers_per_gather);
Assert(parallel_workers > 0);
appendpath = create_append_path(root, rel, parallel_append,
NIL, NULL, parallel_workers, true,
partial_rows);
add_partial_path(rel, (Path *) appendpath);
}
/*
* Also build unparameterized ordered append paths based on the collected
* list of child pathkeys.
*/
if (unparameterized_valid)
generate_orderedappend_paths(root, rel, live_childrels,
all_child_pathkeys);
/*
* Build Append paths for each parameterization seen among the child rels.
* (This may look pretty expensive, but in most cases of practical
* interest, the child rels will expose mostly the same parameterizations,
* so that not that many cases actually get considered here.)
*
* The Append node itself cannot enforce quals, so all qual checking must
* be done in the child paths. This means that to have a parameterized
* Append path, we must have the exact same parameterization for each
* child path; otherwise some children might be failing to check the
* moved-down quals. To make them match up, we can try to increase the
* parameterization of lesser-parameterized paths.
*/
foreach(l, all_child_outers)
{
Relids required_outer = (Relids) lfirst(l);
ListCell *lcr;
AppendPathInput parameterized = {0};
bool parameterized_valid = true;
/* Select the child paths for an Append with this parameterization */
foreach(lcr, live_childrels)
{
RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
Path *subpath;
if (childrel->pathlist == NIL)
{
/* failed to make a suitable path for this child */
parameterized_valid = false;
break;
}
subpath = get_cheapest_parameterized_child_path(root,
childrel,
required_outer);
if (subpath == NULL)
{
/* failed to make a suitable path for this child */
parameterized_valid = false;
break;
}
accumulate_append_subpath(subpath, ¶meterized.subpaths, NULL,
¶meterized.child_append_relid_sets);
}
if (parameterized_valid)
add_path(rel, (Path *)
create_append_path(root, rel, parameterized,
NIL, required_outer, 0, false,
-1));
}
/*
* When there is only a single child relation, the Append path can inherit
* any ordering available for the child rel's path, so that it's useful to
* consider ordered partial paths. Above we only considered the cheapest
* partial path for each child, but let's also make paths using any
* partial paths that have pathkeys.
*/
if (list_length(live_childrels) == 1)
{
RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
/* skip the cheapest partial path, since we already used that above */
for_each_from(l, childrel->partial_pathlist, 1)
{
Path *path = (Path *) lfirst(l);
AppendPath *appendpath;
AppendPathInput append = {0};
/* skip paths with no pathkeys. */
if (path->pathkeys == NIL)
continue;
append.partial_subpaths = list_make1(path);
appendpath = create_append_path(root, rel, append, NIL, NULL,
path->parallel_workers, true,
partial_rows);
add_partial_path(rel, (Path *) appendpath);
}
}
}
/*
* generate_orderedappend_paths
* Generate ordered append paths for an append relation
*
* Usually we generate MergeAppend paths here, but there are some special
* cases where we can generate simple Append paths, because the subpaths
* can provide tuples in the required order already.
*
* We generate a path for each ordering (pathkey list) appearing in
* all_child_pathkeys.
*
* We consider the cheapest-startup and cheapest-total cases, and also the
* cheapest-fractional case when not all tuples need to be retrieved. For each
* interesting ordering, we collect all the cheapest startup subpaths, all the
* cheapest total paths, and, if applicable, all the cheapest fractional paths,
* and build a suitable path for each case.
*
* We don't currently generate any parameterized ordered paths here. While
* it would not take much more code here to do so, it's very unclear that it
* is worth the planning cycles to investigate such paths: there's little
* use for an ordered path on the inside of a nestloop. In fact, it's likely
* that the current coding of add_path would reject such paths out of hand,
* because add_path gives no credit for sort ordering of parameterized paths,
* and a parameterized MergeAppend is going to be more expensive than the
* corresponding parameterized Append path. If we ever try harder to support
* parameterized mergejoin plans, it might be worth adding support for
* parameterized paths here to feed such joins. (See notes in
* optimizer/README for why that might not ever happen, though.)
*/
static void
generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
List *live_childrels,
List *all_child_pathkeys)
{
ListCell *lcp;
List *partition_pathkeys = NIL;
List *partition_pathkeys_desc = NIL;
bool partition_pathkeys_partial = true;
bool partition_pathkeys_desc_partial = true;
/*
* Some partitioned table setups may allow us to use an Append node
* instead of a MergeAppend. This is possible in cases such as RANGE
* partitioned tables where it's guaranteed that an earlier partition must
* contain rows which come earlier in the sort order. To detect whether
* this is relevant, build pathkey descriptions of the partition ordering,
* for both forward and reverse scans.
*/
if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
partitions_are_ordered(rel->boundinfo, rel->live_parts))
{
partition_pathkeys = build_partition_pathkeys(root, rel,
ForwardScanDirection,
&partition_pathkeys_partial);
partition_pathkeys_desc = build_partition_pathkeys(root, rel,
BackwardScanDirection,
&partition_pathkeys_desc_partial);
/*
* You might think we should truncate_useless_pathkeys here, but
* allowing partition keys which are a subset of the query's pathkeys
* can often be useful. For example, consider a table partitioned by
* RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
* paths that can produce the a, b, c ordering (perhaps via indexes on
* (a, b, c)) then it works to consider the appendrel output as
* ordered by a, b, c.
*/
}
/* Now consider each interesting sort ordering */
foreach(lcp, all_child_pathkeys)
{
List *pathkeys = (List *) lfirst(lcp);
AppendPathInput startup = {0};
AppendPathInput total = {0};
AppendPathInput fractional = {0};
bool startup_neq_total = false;
bool fraction_neq_total = false;
bool match_partition_order;
bool match_partition_order_desc;
int end_index;
int first_index;
int direction;
/*
* Determine if this sort ordering matches any partition pathkeys we
* have, for both ascending and descending partition order. If the
* partition pathkeys happen to be contained in pathkeys then it still
* works, as described above, providing that the partition pathkeys
* are complete and not just a prefix of the partition keys. (In such
* cases we'll be relying on the child paths to have sorted the
* lower-order columns of the required pathkeys.)
*/
match_partition_order =
pathkeys_contained_in(pathkeys, partition_pathkeys) ||
(!partition_pathkeys_partial &&
pathkeys_contained_in(partition_pathkeys, pathkeys));
match_partition_order_desc = !match_partition_order &&
(pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
(!partition_pathkeys_desc_partial &&
pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
/*
* When the required pathkeys match the reverse of the partition
* order, we must build the list of paths in reverse starting with the
* last matching partition first. We can get away without making any
* special cases for this in the loop below by just looping backward
* over the child relations in this case.
*/
if (match_partition_order_desc)
{
/* loop backward */
first_index = list_length(live_childrels) - 1;
end_index = -1;
direction = -1;
/*
* Set this to true to save us having to check for
* match_partition_order_desc in the loop below.
*/
match_partition_order = true;
}
else
{
/* for all other case, loop forward */
first_index = 0;
end_index = list_length(live_childrels);
direction = 1;
}
/* Select the child paths for this ordering... */
for (int i = first_index; i != end_index; i += direction)
{
RelOptInfo *childrel = list_nth_node(RelOptInfo, live_childrels, i);
Path *cheapest_startup,
*cheapest_total,
*cheapest_fractional = NULL;
/* Locate the right paths, if they are available. */
cheapest_startup =
get_cheapest_path_for_pathkeys(childrel->pathlist,
pathkeys,
NULL,
STARTUP_COST,
false);
cheapest_total =
get_cheapest_path_for_pathkeys(childrel->pathlist,
pathkeys,
NULL,
TOTAL_COST,
false);
/*
* If we can't find any paths with the right order just use the
* cheapest-total path; we'll have to sort it later.
*/
if (cheapest_startup == NULL || cheapest_total == NULL)
{
cheapest_startup = cheapest_total =
childrel->cheapest_total_path;
/* Assert we do have an unparameterized path for this child */
Assert(cheapest_total->param_info == NULL);
}
/*
* When building a fractional path, determine a cheapest
* fractional path for each child relation too. Looking at startup
* and total costs is not enough, because the cheapest fractional
* path may be dominated by two separate paths (one for startup,
* one for total).
*
* When needed (building fractional path), determine the cheapest
* fractional path too.
*/
if (root->tuple_fraction > 0)
{
double path_fraction = root->tuple_fraction;
/*
* We should not have a dummy child relation here. However,
* we cannot use childrel->rows to compute the tuple fraction,
* as childrel can be an upper relation with an unset row
* estimate. Instead, we use the row estimate from the
* cheapest_total path, which should already have been forced
* to a sane value.
*/
Assert(cheapest_total->rows > 0);
/* Convert absolute limit to a path fraction */
if (path_fraction >= 1.0)
path_fraction /= cheapest_total->rows;
cheapest_fractional =
get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
pathkeys,
NULL,
path_fraction);
/*
* If we found no path with matching pathkeys, use the
* cheapest total path instead.
*
* XXX We might consider partially sorted paths too (with an
* incremental sort on top). But we'd have to build all the
* incremental paths, do the costing etc.
*
* Also, notice whether we actually have different paths for
* the "fractional" and "total" cases. This helps avoid
* generating two identical ordered append paths.
*/
if (cheapest_fractional == NULL)
cheapest_fractional = cheapest_total;
else if (cheapest_fractional != cheapest_total)
fraction_neq_total = true;
}
/*
* Notice whether we actually have different paths for the
* "cheapest" and "total" cases. This helps avoid generating two
* identical ordered append paths.
*/
if (cheapest_startup != cheapest_total)
startup_neq_total = true;
/*
* Collect the appropriate child paths. The required logic varies
* for the Append and MergeAppend cases.
*/
if (match_partition_order)
{
/*
* We're going to make a plain Append path. We don't need
* most of what accumulate_append_subpath would do, but we do
* want to cut out child Appends or MergeAppends if they have
* just a single subpath (and hence aren't doing anything
* useful).
*/
cheapest_startup =
get_singleton_append_subpath(cheapest_startup,
&startup.child_append_relid_sets);
cheapest_total =
get_singleton_append_subpath(cheapest_total,
&total.child_append_relid_sets);
startup.subpaths = lappend(startup.subpaths, cheapest_startup);
total.subpaths = lappend(total.subpaths, cheapest_total);
if (cheapest_fractional)
{
cheapest_fractional =
get_singleton_append_subpath(cheapest_fractional,
&fractional.child_append_relid_sets);
fractional.subpaths =
lappend(fractional.subpaths, cheapest_fractional);
}
}
else
{
/*
* Otherwise, rely on accumulate_append_subpath to collect the
* child paths for the MergeAppend.
*/
accumulate_append_subpath(cheapest_startup,
&startup.subpaths, NULL,
&startup.child_append_relid_sets);
accumulate_append_subpath(cheapest_total,
&total.subpaths, NULL,
&total.child_append_relid_sets);
if (cheapest_fractional)
accumulate_append_subpath(cheapest_fractional,
&fractional.subpaths, NULL,
&fractional.child_append_relid_sets);
}
}
/* ... and build the Append or MergeAppend paths */
if (match_partition_order)
{
/* We only need Append */
add_path(rel, (Path *) create_append_path(root,
rel,
startup,
pathkeys,
NULL,
0,
false,
-1));
if (startup_neq_total)
add_path(rel, (Path *) create_append_path(root,
rel,
total,
pathkeys,
NULL,
0,
false,
-1));
if (fractional.subpaths && fraction_neq_total)
add_path(rel, (Path *) create_append_path(root,
rel,
fractional,
pathkeys,
NULL,
0,
false,
-1));
}
else
{
/* We need MergeAppend */
add_path(rel, (Path *) create_merge_append_path(root,
rel,
startup.subpaths,
startup.child_append_relid_sets,
pathkeys,
NULL));
if (startup_neq_total)
add_path(rel, (Path *) create_merge_append_path(root,
rel,
total.subpaths,
total.child_append_relid_sets,
pathkeys,
NULL));
if (fractional.subpaths && fraction_neq_total)
add_path(rel, (Path *) create_merge_append_path(root,
rel,
fractional.subpaths,
fractional.child_append_relid_sets,
pathkeys,
NULL));
}
}
}
/*
* get_cheapest_parameterized_child_path
* Get cheapest path for this relation that has exactly the requested
* parameterization.
*
* Returns NULL if unable to create such a path.
*/
static Path *
get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
Relids required_outer)
{
Path *cheapest;
ListCell *lc;
/*
* Look up the cheapest existing path with no more than the needed
* parameterization. If it has exactly the needed parameterization, we're
* done.
*/
cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
NIL,
required_outer,
TOTAL_COST,
false);
Assert(cheapest != NULL);
if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
return cheapest;
/*
* Otherwise, we can "reparameterize" an existing path to match the given
* parameterization, which effectively means pushing down additional
* joinquals to be checked within the path's scan. However, some existing
* paths might check the available joinquals already while others don't;
* therefore, it's not clear which existing path will be cheapest after
* reparameterization. We have to go through them all and find out.
*/
cheapest = NULL;
foreach(lc, rel->pathlist)
{
Path *path = (Path *) lfirst(lc);
/* Can't use it if it needs more than requested parameterization */
if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
continue;
/*
* Reparameterization can only increase the path's cost, so if it's
* already more expensive than the current cheapest, forget it.
*/
if (cheapest != NULL &&
compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
continue;
/* Reparameterize if needed, then recheck cost */
if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
{
path = reparameterize_path(root, path, required_outer, 1.0);
if (path == NULL)
continue; /* failed to reparameterize this one */
Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
if (cheapest != NULL &&
compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
continue;
}
/* We have a new best path */
cheapest = path;
}
/* Return the best path, or NULL if we found no suitable candidate */
return cheapest;
}
/*
* accumulate_append_subpath
* Add a subpath to the list being built for an Append or MergeAppend.
*
* It's possible that the child is itself an Append or MergeAppend path, in
* which case we can "cut out the middleman" and just add its child paths to
* our own list. (We don't try to do this earlier because we need to apply
* both levels of transformation to the quals.)
*
* Note that if we omit a child MergeAppend in this way, we are effectively
* omitting a sort step, which seems fine: if the parent is to be an Append,
* its result would be unsorted anyway, while if the parent is to be a
* MergeAppend, there's no point in a separate sort on a child.
*
* Normally, either path is a partial path and subpaths is a list of partial
* paths, or else path is a non-partial plan and subpaths is a list of those.
* However, if path is a parallel-aware Append, then we add its partial path
* children to subpaths and the rest to special_subpaths. If the latter is
* NULL, we don't flatten the path at all (unless it contains only partial
* paths).
*/
static void
accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths,
List **child_append_relid_sets)
{
if (IsA(path, AppendPath))
{
AppendPath *apath = (AppendPath *) path;
if (!apath->path.parallel_aware || apath->first_partial_path == 0)
{
*subpaths = list_concat(*subpaths, apath->subpaths);
*child_append_relid_sets =
lappend(*child_append_relid_sets, path->parent->relids);
*child_append_relid_sets =
list_concat(*child_append_relid_sets,
apath->child_append_relid_sets);
return;
}
else if (special_subpaths != NULL)
{
List *new_special_subpaths;
/* Split Parallel Append into partial and non-partial subpaths */
*subpaths = list_concat(*subpaths,
list_copy_tail(apath->subpaths,
apath->first_partial_path));
new_special_subpaths = list_copy_head(apath->subpaths,
apath->first_partial_path);
*special_subpaths = list_concat(*special_subpaths,
new_special_subpaths);
*child_append_relid_sets =
lappend(*child_append_relid_sets, path->parent->relids);
*child_append_relid_sets =
list_concat(*child_append_relid_sets,
apath->child_append_relid_sets);
return;
}
}
else if (IsA(path, MergeAppendPath))
{
MergeAppendPath *mpath = (MergeAppendPath *) path;
*subpaths = list_concat(*subpaths, mpath->subpaths);
*child_append_relid_sets =
lappend(*child_append_relid_sets, path->parent->relids);
*child_append_relid_sets =
list_concat(*child_append_relid_sets,
mpath->child_append_relid_sets);
return;
}
*subpaths = lappend(*subpaths, path);
}
/*
* get_singleton_append_subpath
* Returns the single subpath of an Append/MergeAppend, or just
* return 'path' if it's not a single sub-path Append/MergeAppend.
*
* As a side effect, whenever we return a single subpath rather than the
* original path, add the relid sets for the original path to
* child_append_relid_sets, so that those relids don't entirely disappear
* from the final plan.
*
* Note: 'path' must not be a parallel-aware path.
*/
static Path *
get_singleton_append_subpath(Path *path, List **child_append_relid_sets)
{
Assert(!path->parallel_aware);
if (IsA(path, AppendPath))
{
AppendPath *apath = (AppendPath *) path;
if (list_length(apath->subpaths) == 1)
{
*child_append_relid_sets =
lappend(*child_append_relid_sets, path->parent->relids);
*child_append_relid_sets =
list_concat(*child_append_relid_sets,
apath->child_append_relid_sets);
return (Path *) linitial(apath->subpaths);
}
}
else if (IsA(path, MergeAppendPath))
{
MergeAppendPath *mpath = (MergeAppendPath *) path;
if (list_length(mpath->subpaths) == 1)
{
*child_append_relid_sets =
lappend(*child_append_relid_sets, path->parent->relids);
*child_append_relid_sets =
list_concat(*child_append_relid_sets,
mpath->child_append_relid_sets);
return (Path *) linitial(mpath->subpaths);
}
}
return path;
}
/*
* set_dummy_rel_pathlist
* Build a dummy path for a relation that's been excluded by constraints
*
* Rather than inventing a special "dummy" path type, we represent this as an
* AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
*
* (See also mark_dummy_rel, which does basically the same thing, but is
* typically used to change a rel into dummy state after we already made
* paths for it.)
*/
static void
set_dummy_rel_pathlist(RelOptInfo *rel)
{
AppendPathInput in = {0};
/* Set dummy size estimates --- we leave attr_widths[] as zeroes */
rel->rows = 0;
rel->reltarget->width = 0;
/* Discard any pre-existing paths; no further need for them */
rel->pathlist = NIL;
rel->partial_pathlist = NIL;
/* Set up the dummy path */
add_path(rel, (Path *) create_append_path(NULL, rel, in,
NIL, rel->lateral_relids,
0, false, -1));
/*
* We set the cheapest-path fields immediately, just in case they were
* pointing at some discarded path. This is redundant in current usage
* because set_rel_pathlist will do it later, but it's cheap so we keep it
* for safety and consistency with mark_dummy_rel.
*/
set_cheapest(rel);
}
/*
* find_window_run_conditions
* Determine if 'wfunc' is really a WindowFunc and call its prosupport
* function to determine the function's monotonic properties. We then
* see if 'opexpr' can be used to short-circuit execution.
*
* For example row_number() over (order by ...) always produces a value one
* higher than the previous. If someone has a window function in a subquery
* and has a WHERE clause in the outer query to filter rows <= 10, then we may
* as well stop processing the windowagg once the row number reaches 11. Here
* we check if 'opexpr' might help us to stop doing needless extra processing
* in WindowAgg nodes.
*
* '*keep_original' is set to true if the caller should also use 'opexpr' for
* its original purpose. This is set to false if the caller can assume that
* the run condition will handle all of the required filtering.
*
* Returns true if 'opexpr' was found to be useful and was added to the
* WindowFunc's runCondition. We also set *keep_original accordingly and add
* 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
* If the 'opexpr' cannot be used then we set *keep_original to true and
* return false.
*/
static bool
find_window_run_conditions(Query *subquery, AttrNumber attno,
WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left,
bool *keep_original, Bitmapset **run_cond_attrs)
{
Oid prosupport;
Expr *otherexpr;
SupportRequestWFuncMonotonic req;
SupportRequestWFuncMonotonic *res;
WindowClause *wclause;
List *opinfos;
OpExpr *runopexpr;
Oid runoperator;
ListCell *lc;
*keep_original = true;
while (IsA(wfunc, RelabelType))
wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
/* we can only work with window functions */
if (!IsA(wfunc, WindowFunc))
return false;
/* can't use it if there are subplans in the WindowFunc */
if (contain_subplans((Node *) wfunc))
return false;
prosupport = get_func_support(wfunc->winfnoid);
/* Check if there's a support function for 'wfunc' */
if (!OidIsValid(prosupport))
return false;
/* get the Expr from the other side of the OpExpr */
if (wfunc_left)
otherexpr = lsecond(opexpr->args);
else
otherexpr = linitial(opexpr->args);
/*
* The value being compared must not change during the evaluation of the
* window partition.
*/
if (!is_pseudo_constant_clause((Node *) otherexpr))
return false;
/* find the window clause belonging to the window function */
wclause = (WindowClause *) list_nth(subquery->windowClause,
wfunc->winref - 1);
req.type = T_SupportRequestWFuncMonotonic;
req.window_func = wfunc;
req.window_clause = wclause;
/* call the support function */
res = (SupportRequestWFuncMonotonic *)
DatumGetPointer(OidFunctionCall1(prosupport,
PointerGetDatum(&req)));
/*
* Nothing to do if the function is neither monotonically increasing nor
* monotonically decreasing.
*/
if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
return false;
runopexpr = NULL;
runoperator = InvalidOid;
opinfos = get_op_index_interpretation(opexpr->opno);
foreach(lc, opinfos)
{
OpIndexInterpretation *opinfo = (OpIndexInterpretation *) lfirst(lc);
CompareType cmptype = opinfo->cmptype;
/* handle < / <= */
if (cmptype == COMPARE_LT || cmptype == COMPARE_LE)
{
/*
* < / <= is supported for monotonically increasing functions in
* the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
* for monotonically decreasing functions.
*/
if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
(!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
{
*keep_original = false;
runopexpr = opexpr;
runoperator = opexpr->opno;
}
break;
}
/* handle > / >= */
else if (cmptype == COMPARE_GT || cmptype == COMPARE_GE)
{
/*
* > / >= is supported for monotonically decreasing functions in
* the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
* for monotonically increasing functions.
*/
if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
(!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
{
*keep_original = false;
runopexpr = opexpr;
runoperator = opexpr->opno;
}
break;
}
/* handle = */
else if (cmptype == COMPARE_EQ)
{
CompareType newcmptype;
/*
* When both monotonically increasing and decreasing then the
* return value of the window function will be the same each time.
* We can simply use 'opexpr' as the run condition without
* modifying it.
*/
if ((res->monotonic & MONOTONICFUNC_BOTH) == MONOTONICFUNC_BOTH)
{
*keep_original = false;
runopexpr = opexpr;
runoperator = opexpr->opno;
break;
}
/*
* When monotonically increasing we make a qual with <wfunc> <=
* <value> or <value> >= <wfunc> in order to filter out values
* which are above the value in the equality condition. For
* monotonically decreasing functions we want to filter values
* below the value in the equality condition.
*/
if (res->monotonic & MONOTONICFUNC_INCREASING)
newcmptype = wfunc_left ? COMPARE_LE : COMPARE_GE;
else
newcmptype = wfunc_left ? COMPARE_GE : COMPARE_LE;
/* We must keep the original equality qual */
*keep_original = true;
runopexpr = opexpr;
/* determine the operator to use for the WindowFuncRunCondition */
runoperator = get_opfamily_member_for_cmptype(opinfo->opfamily_id,
opinfo->oplefttype,
opinfo->oprighttype,
newcmptype);
break;
}
}
if (runopexpr != NULL)
{
WindowFuncRunCondition *wfuncrc;
wfuncrc = makeNode(WindowFuncRunCondition);
wfuncrc->opno = runoperator;
wfuncrc->inputcollid = runopexpr->inputcollid;
wfuncrc->wfunc_left = wfunc_left;
wfuncrc->arg = copyObject(otherexpr);
wfunc->runCondition = lappend(wfunc->runCondition, wfuncrc);
/* record that this attno was used in a run condition */
*run_cond_attrs = bms_add_member(*run_cond_attrs,
attno - FirstLowInvalidHeapAttributeNumber);
return true;
}
/* unsupported OpExpr */
return false;
}
/*
* check_and_push_window_quals
* Check if 'clause' is a qual that can be pushed into a WindowFunc
* as a 'runCondition' qual. These, when present, allow some unnecessary
* work to be skipped during execution.
*
* 'run_cond_attrs' will be populated with all targetlist resnos of subquery
* targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
* window quals for.
*
* Returns true if the caller still must keep the original qual or false if
* the caller can safely ignore the original qual because the WindowAgg node
* will use the runCondition to stop returning tuples.
*/
static bool
check_and_push_window_quals(Query *subquery, Node *clause,
Bitmapset **run_cond_attrs)
{
OpExpr *opexpr = (OpExpr *) clause;
bool keep_original = true;
Var *var1;
Var *var2;
/* We're only able to use OpExprs with 2 operands */
if (!IsA(opexpr, OpExpr))
return true;
if (list_length(opexpr->args) != 2)
return true;
/*
* Currently, we restrict this optimization to strict OpExprs. The reason
* for this is that during execution, once the runcondition becomes false,
* we stop evaluating WindowFuncs. To avoid leaving around stale window
* function result values, we set them to NULL. Having only strict
* OpExprs here ensures that we properly filter out the tuples with NULLs
* in the top-level WindowAgg.
*/
set_opfuncid(opexpr);
if (!func_strict(opexpr->opfuncid))
return true;
/*
* Check for plain Vars that reference window functions in the subquery.
* If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
* be used as part of the run condition.
*/
/* Check the left side of the OpExpr */
var1 = linitial(opexpr->args);
if (IsA(var1, Var) && var1->varattno > 0)
{
TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
WindowFunc *wfunc = (WindowFunc *) tle->expr;
if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
true, &keep_original, run_cond_attrs))
return keep_original;
}
/* and check the right side */
var2 = lsecond(opexpr->args);
if (IsA(var2, Var) && var2->varattno > 0)
{
TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
WindowFunc *wfunc = (WindowFunc *) tle->expr;
if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
false, &keep_original, run_cond_attrs))
return keep_original;
}
return true;
}
/*
* set_subquery_pathlist
* Generate SubqueryScan access paths for a subquery RTE
*
* We don't currently support generating parameterized paths for subqueries
* by pushing join clauses down into them; it seems too expensive to re-plan
* the subquery multiple times to consider different alternatives.
* (XXX that could stand to be reconsidered, now that we use Paths.)
* So the paths made here will be parameterized if the subquery contains
* LATERAL references, otherwise not. As long as that's true, there's no need
* for a separate set_subquery_size phase: just make the paths right away.
*/
static void
set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte)
{
Query *parse = root->parse;
Query *subquery = rte->subquery;
bool trivial_pathtarget;
Relids required_outer;
pushdown_safety_info safetyInfo;
double tuple_fraction;
RelOptInfo *sub_final_rel;
Bitmapset *run_cond_attrs = NULL;
ListCell *lc;
char *plan_name;
/*
* Must copy the Query so that planning doesn't mess up the RTE contents
* (really really need to fix the planner to not scribble on its input,
* someday ... but see remove_unused_subquery_outputs to start with).
*/
subquery = copyObject(subquery);
/*
* If it's a LATERAL subquery, it might contain some Vars of the current
* query level, requiring it to be treated as parameterized, even though
* we don't support pushing down join quals into subqueries.
*/
required_outer = rel->lateral_relids;
/*
* Zero out result area for subquery_is_pushdown_safe, so that it can set
* flags as needed while recursing. In particular, we need a workspace
* for keeping track of the reasons why columns are unsafe to reference.
* These reasons are stored in the bits inside unsafeFlags[i] when we
* discover reasons that column i of the subquery is unsafe to be used in
* a pushed-down qual.
*/
memset(&safetyInfo, 0, sizeof(safetyInfo));
safetyInfo.unsafeFlags = (unsigned char *)
palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
/*
* If the subquery has the "security_barrier" flag, it means the subquery
* originated from a view that must enforce row-level security. Then we
* must not push down quals that contain leaky functions. (Ideally this
* would be checked inside subquery_is_pushdown_safe, but since we don't
* currently pass the RTE to that function, we must do it here.)
*/
safetyInfo.unsafeLeaky = rte->security_barrier;
/*
* If there are any restriction clauses that have been attached to the
* subquery relation, consider pushing them down to become WHERE or HAVING
* quals of the subquery itself. This transformation is useful because it
* may allow us to generate a better plan for the subquery than evaluating
* all the subquery output rows and then filtering them.
*
* There are several cases where we cannot push down clauses. Restrictions
* involving the subquery are checked by subquery_is_pushdown_safe().
* Restrictions on individual clauses are checked by
* qual_is_pushdown_safe(). Also, we don't want to push down
* pseudoconstant clauses; better to have the gating node above the
* subquery.
*
* Non-pushed-down clauses will get evaluated as qpquals of the
* SubqueryScan node.
*
* XXX Are there any cases where we want to make a policy decision not to
* push down a pushable qual, because it'd result in a worse plan?
*/
if (rel->baserestrictinfo != NIL &&
subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
{
/* OK to consider pushing down individual quals */
List *upperrestrictlist = NIL;
ListCell *l;
foreach(l, rel->baserestrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
Node *clause = (Node *) rinfo->clause;
if (rinfo->pseudoconstant)
{
upperrestrictlist = lappend(upperrestrictlist, rinfo);
continue;
}
switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
{
case PUSHDOWN_SAFE:
/* Push it down */
subquery_push_qual(subquery, rte, rti, clause);
break;
case PUSHDOWN_WINDOWCLAUSE_RUNCOND:
/*
* Since we can't push the qual down into the subquery,
* check if it happens to reference a window function. If
* so then it might be useful to use for the WindowAgg's
* runCondition.
*/
if (!subquery->hasWindowFuncs ||
check_and_push_window_quals(subquery, clause,
&run_cond_attrs))
{
/*
* subquery has no window funcs or the clause is not a
* suitable window run condition qual or it is, but
* the original must also be kept in the upper query.
*/
upperrestrictlist = lappend(upperrestrictlist, rinfo);
}
break;
case PUSHDOWN_UNSAFE:
upperrestrictlist = lappend(upperrestrictlist, rinfo);
break;
}
}
rel->baserestrictinfo = upperrestrictlist;
/* We don't bother recomputing baserestrict_min_security */
}
pfree(safetyInfo.unsafeFlags);
/*
* The upper query might not use all the subquery's output columns; if
* not, we can simplify. Pass the attributes that were pushed down into
* WindowAgg run conditions to ensure we don't accidentally think those
* are unused.
*/
remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
/*
* We can safely pass the outer tuple_fraction down to the subquery if the
* outer level has no joining, aggregation, or sorting to do. Otherwise
* we'd better tell the subquery to plan for full retrieval. (XXX This
* could probably be made more intelligent ...)
*/
if (parse->hasAggs ||
parse->groupClause ||
parse->groupingSets ||
root->hasHavingQual ||
parse->distinctClause ||
parse->sortClause ||
bms_membership(root->all_baserels) == BMS_MULTIPLE)
tuple_fraction = 0.0; /* default case */
else
tuple_fraction = root->tuple_fraction;
/* plan_params should not be in use in current query level */
Assert(root->plan_params == NIL);
/* Generate a subroot and Paths for the subquery */
plan_name = choose_plan_name(root->glob, rte->eref->aliasname, false);
rel->subroot = subquery_planner(root->glob, subquery, plan_name,
root, false, tuple_fraction, NULL);
/* Isolate the params needed by this specific subplan */
rel->subplan_params = root->plan_params;
root->plan_params = NIL;
/*
* It's possible that constraint exclusion proved the subquery empty. If
* so, it's desirable to produce an unadorned dummy path so that we will
* recognize appropriate optimizations at this query level.
*/
sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
if (IS_DUMMY_REL(sub_final_rel))
{
set_dummy_rel_pathlist(rel);
return;
}
/*
* Mark rel with estimated output rows, width, etc. Note that we have to
* do this before generating outer-query paths, else cost_subqueryscan is
* not happy.
*/
set_subquery_size_estimates(root, rel);
/*
* Also detect whether the reltarget is trivial, so that we can pass that
* info to cost_subqueryscan (rather than re-deriving it multiple times).
* It's trivial if it fetches all the subplan output columns in order.
*/
if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
trivial_pathtarget = false;
else
{
trivial_pathtarget = true;
foreach(lc, rel->reltarget->exprs)
{
Node *node = (Node *) lfirst(lc);
Var *var;
if (!IsA(node, Var))
{
trivial_pathtarget = false;
break;
}
var = (Var *) node;
if (var->varno != rti ||
var->varattno != foreach_current_index(lc) + 1)
{
trivial_pathtarget = false;
break;
}
}
}
/*
* For each Path that subquery_planner produced, make a SubqueryScanPath
* in the outer query.
*/
foreach(lc, sub_final_rel->pathlist)
{
Path *subpath = (Path *) lfirst(lc);
List *pathkeys;
/* Convert subpath's pathkeys to outer representation */
pathkeys = convert_subquery_pathkeys(root,
rel,
subpath->pathkeys,
make_tlist_from_pathtarget(subpath->pathtarget));
/* Generate outer path using this subpath */
add_path(rel, (Path *)
create_subqueryscan_path(root, rel, subpath,
trivial_pathtarget,
pathkeys, required_outer));
}
/* If outer rel allows parallelism, do same for partial paths. */
if (rel->consider_parallel && bms_is_empty(required_outer))
{
/* If consider_parallel is false, there should be no partial paths. */
Assert(sub_final_rel->consider_parallel ||
sub_final_rel->partial_pathlist == NIL);
/* Same for partial paths. */
foreach(lc, sub_final_rel->partial_pathlist)
{
Path *subpath = (Path *) lfirst(lc);
List *pathkeys;
/* Convert subpath's pathkeys to outer representation */
pathkeys = convert_subquery_pathkeys(root,
rel,
subpath->pathkeys,
make_tlist_from_pathtarget(subpath->pathtarget));
/* Generate outer path using this subpath */
add_partial_path(rel, (Path *)
create_subqueryscan_path(root, rel, subpath,
trivial_pathtarget,
pathkeys,
required_outer));
}
}
}
/*
* set_function_pathlist
* Build the (single) access path for a function RTE
*/
static void
set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Relids required_outer;
List *pathkeys = NIL;
/*
* We don't support pushing join clauses into the quals of a function
* scan, but it could still have required parameterization due to LATERAL
* refs in the function expression.
*/
required_outer = rel->lateral_relids;
/*
* The result is considered unordered unless ORDINALITY was used, in which
* case it is ordered by the ordinal column (the last one). See if we
* care, by checking for uses of that Var in equivalence classes.
*/
if (rte->funcordinality)
{
AttrNumber ordattno = rel->max_attr;
Var *var = NULL;
ListCell *lc;
/*
* Is there a Var for it in rel's targetlist? If not, the query did
* not reference the ordinality column, or at least not in any way
* that would be interesting for sorting.
*/
foreach(lc, rel->reltarget->exprs)
{
Var *node = (Var *) lfirst(lc);
/* checking varno/varlevelsup is just paranoia */
if (IsA(node, Var) &&
node->varattno == ordattno &&
node->varno == rel->relid &&
node->varlevelsup == 0)
{
var = node;
break;
}
}
/*
* Try to build pathkeys for this Var with int8 sorting. We tell
* build_expression_pathkey not to build any new equivalence class; if
* the Var isn't already mentioned in some EC, it means that nothing
* cares about the ordering.
*/
if (var)
pathkeys = build_expression_pathkey(root,
(Expr *) var,
Int8LessOperator,
rel->relids,
false);
}
/* Generate appropriate path */
add_path(rel, create_functionscan_path(root, rel,
pathkeys, required_outer));
}
/*
* set_values_pathlist
* Build the (single) access path for a VALUES RTE
*/
static void
set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Relids required_outer;
/*
* We don't support pushing join clauses into the quals of a values scan,
* but it could still have required parameterization due to LATERAL refs
* in the values expressions.
*/
required_outer = rel->lateral_relids;
/* Generate appropriate path */
add_path(rel, create_valuesscan_path(root, rel, required_outer));
}
/*
* set_tablefunc_pathlist
* Build the (single) access path for a table func RTE
*/
static void
set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Relids required_outer;
/*
* We don't support pushing join clauses into the quals of a tablefunc
* scan, but it could still have required parameterization due to LATERAL
* refs in the function expression.
*/
required_outer = rel->lateral_relids;
/* Generate appropriate path */
add_path(rel, create_tablefuncscan_path(root, rel,
required_outer));
}
/*
* set_cte_pathlist
* Build the (single) access path for a non-self-reference CTE RTE
*
* There's no need for a separate set_cte_size phase, since we don't
* support join-qual-parameterized paths for CTEs.
*/
static void
set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Path *ctepath;
Plan *cteplan;
PlannerInfo *cteroot;
Index levelsup;
List *pathkeys;
int ndx;
ListCell *lc;
int plan_id;
Relids required_outer;
/*
* Find the referenced CTE, and locate the path and plan previously made
* for it.
*/
levelsup = rte->ctelevelsup;
cteroot = root;
while (levelsup-- > 0)
{
cteroot = cteroot->parent_root;
if (!cteroot) /* shouldn't happen */
elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
}
/*
* Note: cte_plan_ids can be shorter than cteList, if we are still working
* on planning the CTEs (ie, this is a side-reference from another CTE).
* So we mustn't use forboth here.
*/
ndx = 0;
foreach(lc, cteroot->parse->cteList)
{
CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
if (strcmp(cte->ctename, rte->ctename) == 0)
break;
ndx++;
}
if (lc == NULL) /* shouldn't happen */
elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
if (ndx >= list_length(cteroot->cte_plan_ids))
elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
if (plan_id <= 0)
elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
Assert(list_length(root->glob->subpaths) == list_length(root->glob->subplans));
ctepath = (Path *) list_nth(root->glob->subpaths, plan_id - 1);
cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
/* Mark rel with estimated output rows, width, etc */
set_cte_size_estimates(root, rel, cteplan->plan_rows);
/* Convert the ctepath's pathkeys to outer query's representation */
pathkeys = convert_subquery_pathkeys(root,
rel,
ctepath->pathkeys,
cteplan->targetlist);
/*
* We don't support pushing join clauses into the quals of a CTE scan, but
* it could still have required parameterization due to LATERAL refs in
* its tlist.
*/
required_outer = rel->lateral_relids;
/* Generate appropriate path */
add_path(rel, create_ctescan_path(root, rel, pathkeys, required_outer));
}
/*
* set_namedtuplestore_pathlist
* Build the (single) access path for a named tuplestore RTE
*
* There's no need for a separate set_namedtuplestore_size phase, since we
* don't support join-qual-parameterized paths for tuplestores.
*/
static void
set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte)
{
Relids required_outer;
/* Mark rel with estimated output rows, width, etc */
set_namedtuplestore_size_estimates(root, rel);
/*
* We don't support pushing join clauses into the quals of a tuplestore
* scan, but it could still have required parameterization due to LATERAL
* refs in its tlist.
*/
required_outer = rel->lateral_relids;
/* Generate appropriate path */
add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
}
/*
* set_result_pathlist
* Build the (single) access path for an RTE_RESULT RTE
*
* There's no need for a separate set_result_size phase, since we
* don't support join-qual-parameterized paths for these RTEs.
*/
static void
set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte)
{
Relids required_outer;
/* Mark rel with estimated output rows, width, etc */
set_result_size_estimates(root, rel);
/*
* We don't support pushing join clauses into the quals of a Result scan,
* but it could still have required parameterization due to LATERAL refs
* in its tlist.
*/
required_outer = rel->lateral_relids;
/* Generate appropriate path */
add_path(rel, create_resultscan_path(root, rel, required_outer));
}
/*
* set_worktable_pathlist
* Build the (single) access path for a self-reference CTE RTE
*
* There's no need for a separate set_worktable_size phase, since we don't
* support join-qual-parameterized paths for CTEs.
*/
static void
set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Path *ctepath;
PlannerInfo *cteroot;
Index levelsup;
Relids required_outer;
/*
* We need to find the non-recursive term's path, which is in the plan
* level that's processing the recursive UNION, which is one level *below*
* where the CTE comes from.
*/
levelsup = rte->ctelevelsup;
if (levelsup == 0) /* shouldn't happen */
elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
levelsup--;
cteroot = root;
while (levelsup-- > 0)
{
cteroot = cteroot->parent_root;
if (!cteroot) /* shouldn't happen */
elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
}
ctepath = cteroot->non_recursive_path;
if (!ctepath) /* shouldn't happen */
elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
/* Mark rel with estimated output rows, width, etc */
set_cte_size_estimates(root, rel, ctepath->rows);
/*
* We don't support pushing join clauses into the quals of a worktable
* scan, but it could still have required parameterization due to LATERAL
* refs in its tlist. (I'm not sure this is actually possible given the
* restrictions on recursive references, but it's easy enough to support.)
*/
required_outer = rel->lateral_relids;
/* Generate appropriate path */
add_path(rel, create_worktablescan_path(root, rel, required_outer));
}
/*
* generate_gather_paths
* Generate parallel access paths for a relation by pushing a Gather or
* Gather Merge on top of a partial path.
*
* This must not be called until after we're done creating all partial paths
* for the specified relation. (Otherwise, add_partial_path might delete a
* path that some GatherPath or GatherMergePath has a reference to.)
*
* If we're generating paths for a scan or join relation, override_rows will
* be false, and we'll just use the relation's size estimate. When we're
* being called for a partially-grouped or partially-distinct path, though, we
* need to override the rowcount estimate. (It's not clear that the
* particular value we're using here is actually best, but the underlying rel
* has no estimate so we must do something.)
*/
void
generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
{
Path *cheapest_partial_path;
Path *simple_gather_path;
ListCell *lc;
double rows;
double *rowsp = NULL;
/* If there are no partial paths, there's nothing to do here. */
if (rel->partial_pathlist == NIL)
return;
/* Should we override the rel's rowcount estimate? */
if (override_rows)
rowsp = &rows;
/*
* The output of Gather is always unsorted, so there's only one partial
* path of interest: the cheapest one. That will be the one at the front
* of partial_pathlist because of the way add_partial_path works.
*/
cheapest_partial_path = linitial(rel->partial_pathlist);
rows = compute_gather_rows(cheapest_partial_path);
simple_gather_path = (Path *)
create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
NULL, rowsp);
add_path(rel, simple_gather_path);
/*
* For each useful ordering, we can consider an order-preserving Gather
* Merge.
*/
foreach(lc, rel->partial_pathlist)
{
Path *subpath = (Path *) lfirst(lc);
GatherMergePath *path;
if (subpath->pathkeys == NIL)
continue;
rows = compute_gather_rows(subpath);
path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
subpath->pathkeys, NULL, rowsp);
add_path(rel, &path->path);
}
}
/*
* get_useful_pathkeys_for_relation
* Determine which orderings of a relation might be useful.
*
* Getting data in sorted order can be useful either because the requested
* order matches the final output ordering for the overall query we're
* planning, or because it enables an efficient merge join. Here, we try
* to figure out which pathkeys to consider.
*
* This allows us to do incremental sort on top of an index scan under a gather
* merge node, i.e. parallelized.
*
* If the require_parallel_safe is true, we also require the expressions to
* be parallel safe (which allows pushing the sort below Gather Merge).
*
* XXX At the moment this can only ever return a list with a single element,
* because it looks at query_pathkeys only. So we might return the pathkeys
* directly, but it seems plausible we'll want to consider other orderings
* in the future. For example, we might want to consider pathkeys useful for
* merge joins.
*/
static List *
get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel,
bool require_parallel_safe)
{
List *useful_pathkeys_list = NIL;
/*
* Considering query_pathkeys is always worth it, because it might allow
* us to avoid a total sort when we have a partially presorted path
* available or to push the total sort into the parallel portion of the
* query.
*/
if (root->query_pathkeys)
{
ListCell *lc;
int npathkeys = 0; /* useful pathkeys */
foreach(lc, root->query_pathkeys)
{
PathKey *pathkey = (PathKey *) lfirst(lc);
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
/*
* We can only build a sort for pathkeys that contain a
* safe-to-compute-early EC member computable from the current
* relation's reltarget, so ignore the remainder of the list as
* soon as we find a pathkey without such a member.
*
* It's still worthwhile to return any prefix of the pathkeys list
* that meets this requirement, as we may be able to do an
* incremental sort.
*
* If requested, ensure the sort expression is parallel-safe too.
*/
if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
require_parallel_safe))
break;
npathkeys++;
}
/*
* The whole query_pathkeys list matches, so append it directly, to
* allow comparing pathkeys easily by comparing list pointer. If we
* have to truncate the pathkeys, we gotta do a copy though.
*/
if (npathkeys == list_length(root->query_pathkeys))
useful_pathkeys_list = lappend(useful_pathkeys_list,
root->query_pathkeys);
else if (npathkeys > 0)
useful_pathkeys_list = lappend(useful_pathkeys_list,
list_copy_head(root->query_pathkeys,
npathkeys));
}
return useful_pathkeys_list;
}
/*
* generate_useful_gather_paths
* Generate parallel access paths for a relation by pushing a Gather or
* Gather Merge on top of a partial path.
*
* Unlike plain generate_gather_paths, this looks both at pathkeys of input
* paths (aiming to preserve the ordering), but also considers ordering that
* might be useful for nodes above the gather merge node, and tries to add
* a sort (regular or incremental) to provide that.
*/
void
generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
{
ListCell *lc;
double rows;
double *rowsp = NULL;
List *useful_pathkeys_list = NIL;
Path *cheapest_partial_path = NULL;
/* If there are no partial paths, there's nothing to do here. */
if (rel->partial_pathlist == NIL)
return;
/* Should we override the rel's rowcount estimate? */
if (override_rows)
rowsp = &rows;
/* generate the regular gather (merge) paths */
generate_gather_paths(root, rel, override_rows);
/* consider incremental sort for interesting orderings */
useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
/* used for explicit (full) sort paths */
cheapest_partial_path = linitial(rel->partial_pathlist);
/*
* Consider sorted paths for each interesting ordering. We generate both
* incremental and full sort.
*/
foreach(lc, useful_pathkeys_list)
{
List *useful_pathkeys = lfirst(lc);
ListCell *lc2;
bool is_sorted;
int presorted_keys;
foreach(lc2, rel->partial_pathlist)
{
Path *subpath = (Path *) lfirst(lc2);
GatherMergePath *path;
is_sorted = pathkeys_count_contained_in(useful_pathkeys,
subpath->pathkeys,
&presorted_keys);
/*
* We don't need to consider the case where a subpath is already
* fully sorted because generate_gather_paths already creates a
* gather merge path for every subpath that has pathkeys present.
*
* But since the subpath is already sorted, we know we don't need
* to consider adding a sort (full or incremental) on top of it,
* so we can continue here.
*/
if (is_sorted)
continue;
/*
* Try at least sorting the cheapest path and also try
* incrementally sorting any path which is partially sorted
* already (no need to deal with paths which have presorted keys
* when incremental sort is disabled unless it's the cheapest
* input path).
*/
if (subpath != cheapest_partial_path &&
(presorted_keys == 0 || !enable_incremental_sort))
continue;
/*
* Consider regular sort for any path that's not presorted or if
* incremental sort is disabled. We've no need to consider both
* sort and incremental sort on the same path. We assume that
* incremental sort is always faster when there are presorted
* keys.
*
* This is not redundant with the gather paths created in
* generate_gather_paths, because that doesn't generate ordered
* output. Here we add an explicit sort to match the useful
* ordering.
*/
if (presorted_keys == 0 || !enable_incremental_sort)
{
subpath = (Path *) create_sort_path(root,
rel,
subpath,
useful_pathkeys,
-1.0);
}
else
subpath = (Path *) create_incremental_sort_path(root,
rel,
subpath,
useful_pathkeys,
presorted_keys,
-1);
rows = compute_gather_rows(subpath);
path = create_gather_merge_path(root, rel,
subpath,
rel->reltarget,
subpath->pathkeys,
NULL,
rowsp);
add_path(rel, &path->path);
}
}
}
/*
* generate_grouped_paths
* Generate paths for a grouped relation by adding sorted and hashed
* partial aggregation paths on top of paths of the ungrouped relation.
*
* The information needed is provided by the RelAggInfo structure stored in
* "grouped_rel".
*/
void
generate_grouped_paths(PlannerInfo *root, RelOptInfo *grouped_rel,
RelOptInfo *rel)
{
RelAggInfo *agg_info = grouped_rel->agg_info;
AggClauseCosts agg_costs;
bool can_hash;
bool can_sort;
Path *cheapest_total_path = NULL;
Path *cheapest_partial_path = NULL;
double dNumGroups = 0;
double dNumPartialGroups = 0;
List *group_pathkeys = NIL;
if (IS_DUMMY_REL(rel))
{
mark_dummy_rel(grouped_rel);
return;
}
/*
* We push partial aggregation only to the lowest possible level in the
* join tree that is deemed useful.
*/
if (!bms_equal(agg_info->apply_agg_at, rel->relids) ||
!agg_info->agg_useful)
return;
MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL, &agg_costs);
/*
* Determine whether it's possible to perform sort-based implementations
* of grouping, and generate the pathkeys that represent the grouping
* requirements in that case.
*/
can_sort = grouping_is_sortable(agg_info->group_clauses);
if (can_sort)
{
RelOptInfo *top_grouped_rel;
List *top_group_tlist;
top_grouped_rel = IS_OTHER_REL(rel) ?
rel->top_parent->grouped_rel : grouped_rel;
top_group_tlist =
make_tlist_from_pathtarget(top_grouped_rel->agg_info->target);
group_pathkeys =
make_pathkeys_for_sortclauses(root, agg_info->group_clauses,
top_group_tlist);
}
/*
* Determine whether we should consider hash-based implementations of
* grouping.
*/
Assert(root->numOrderedAggs == 0);
can_hash = (agg_info->group_clauses != NIL &&
grouping_is_hashable(agg_info->group_clauses));
/*
* Consider whether we should generate partially aggregated non-partial
* paths. We can only do this if we have a non-partial path.
*/
if (rel->pathlist != NIL)
{
cheapest_total_path = rel->cheapest_total_path;
Assert(cheapest_total_path != NULL);
}
/*
* If parallelism is possible for grouped_rel, then we should consider
* generating partially-grouped partial paths. However, if the ungrouped
* rel has no partial paths, then we can't.
*/
if (grouped_rel->consider_parallel && rel->partial_pathlist != NIL)
{
cheapest_partial_path = linitial(rel->partial_pathlist);
Assert(cheapest_partial_path != NULL);
}
/* Estimate number of partial groups. */
if (cheapest_total_path != NULL)
dNumGroups = estimate_num_groups(root,
agg_info->group_exprs,
cheapest_total_path->rows,
NULL, NULL);
if (cheapest_partial_path != NULL)
dNumPartialGroups = estimate_num_groups(root,
agg_info->group_exprs,
cheapest_partial_path->rows,
NULL, NULL);
if (can_sort && cheapest_total_path != NULL)
{
ListCell *lc;
/*
* Use any available suitably-sorted path as input, and also consider
* sorting the cheapest-total path and incremental sort on any paths
* with presorted keys.
*
* To save planning time, we ignore parameterized input paths unless
* they are the cheapest-total path.
*/
foreach(lc, rel->pathlist)
{
Path *input_path = (Path *) lfirst(lc);
Path *path;
bool is_sorted;
int presorted_keys;
/*
* Ignore parameterized paths that are not the cheapest-total
* path.
*/
if (input_path->param_info &&
input_path != cheapest_total_path)
continue;
is_sorted = pathkeys_count_contained_in(group_pathkeys,
input_path->pathkeys,
&presorted_keys);
/*
* Ignore paths that are not suitably or partially sorted, unless
* they are the cheapest total path (no need to deal with paths
* which have presorted keys when incremental sort is disabled).
*/
if (!is_sorted && input_path != cheapest_total_path &&
(presorted_keys == 0 || !enable_incremental_sort))
continue;
/*
* Since the path originates from a non-grouped relation that is
* not aware of eager aggregation, we must ensure that it provides
* the correct input for partial aggregation.
*/
path = (Path *) create_projection_path(root,
grouped_rel,
input_path,
agg_info->agg_input);
if (!is_sorted)
{
/*
* We've no need to consider both a sort and incremental sort.
* We'll just do a sort if there are no presorted keys and an
* incremental sort when there are presorted keys.
*/
if (presorted_keys == 0 || !enable_incremental_sort)
path = (Path *) create_sort_path(root,
grouped_rel,
path,
group_pathkeys,
-1.0);
else
path = (Path *) create_incremental_sort_path(root,
grouped_rel,
path,
group_pathkeys,
presorted_keys,
-1.0);
}
/*
* qual is NIL because the HAVING clause cannot be evaluated until
* the final value of the aggregate is known.
*/
path = (Path *) create_agg_path(root,
grouped_rel,
path,
agg_info->target,
AGG_SORTED,
AGGSPLIT_INITIAL_SERIAL,
agg_info->group_clauses,
NIL,
&agg_costs,
dNumGroups);
add_path(grouped_rel, path);
}
}
if (can_sort && cheapest_partial_path != NULL)
{
ListCell *lc;
/* Similar to above logic, but for partial paths. */
foreach(lc, rel->partial_pathlist)
{
Path *input_path = (Path *) lfirst(lc);
Path *path;
bool is_sorted;
int presorted_keys;
is_sorted = pathkeys_count_contained_in(group_pathkeys,
input_path->pathkeys,
&presorted_keys);
/*
* Ignore paths that are not suitably or partially sorted, unless
* they are the cheapest partial path (no need to deal with paths
* which have presorted keys when incremental sort is disabled).
*/
if (!is_sorted && input_path != cheapest_partial_path &&
(presorted_keys == 0 || !enable_incremental_sort))
continue;
/*
* Since the path originates from a non-grouped relation that is
* not aware of eager aggregation, we must ensure that it provides
* the correct input for partial aggregation.
*/
path = (Path *) create_projection_path(root,
grouped_rel,
input_path,
agg_info->agg_input);
if (!is_sorted)
{
/*
* We've no need to consider both a sort and incremental sort.
* We'll just do a sort if there are no presorted keys and an
* incremental sort when there are presorted keys.
*/
if (presorted_keys == 0 || !enable_incremental_sort)
path = (Path *) create_sort_path(root,
grouped_rel,
path,
group_pathkeys,
-1.0);
else
path = (Path *) create_incremental_sort_path(root,
grouped_rel,
path,
group_pathkeys,
presorted_keys,
-1.0);
}
/*
* qual is NIL because the HAVING clause cannot be evaluated until
* the final value of the aggregate is known.
*/
path = (Path *) create_agg_path(root,
grouped_rel,
path,
agg_info->target,
AGG_SORTED,
AGGSPLIT_INITIAL_SERIAL,
agg_info->group_clauses,
NIL,
&agg_costs,
dNumPartialGroups);
add_partial_path(grouped_rel, path);
}
}
/*
* Add a partially-grouped HashAgg Path where possible
*/
if (can_hash && cheapest_total_path != NULL)
{
Path *path;
/*
* Since the path originates from a non-grouped relation that is not
* aware of eager aggregation, we must ensure that it provides the
* correct input for partial aggregation.
*/
path = (Path *) create_projection_path(root,
grouped_rel,
cheapest_total_path,
agg_info->agg_input);
/*
* qual is NIL because the HAVING clause cannot be evaluated until the
* final value of the aggregate is known.
*/
path = (Path *) create_agg_path(root,
grouped_rel,
path,
agg_info->target,
AGG_HASHED,
AGGSPLIT_INITIAL_SERIAL,
agg_info->group_clauses,
NIL,
&agg_costs,
dNumGroups);
add_path(grouped_rel, path);
}
/*
* Now add a partially-grouped HashAgg partial Path where possible
*/
if (can_hash && cheapest_partial_path != NULL)
{
Path *path;
/*
* Since the path originates from a non-grouped relation that is not
* aware of eager aggregation, we must ensure that it provides the
* correct input for partial aggregation.
*/
path = (Path *) create_projection_path(root,
grouped_rel,
cheapest_partial_path,
agg_info->agg_input);
/*
* qual is NIL because the HAVING clause cannot be evaluated until the
* final value of the aggregate is known.
*/
path = (Path *) create_agg_path(root,
grouped_rel,
path,
agg_info->target,
AGG_HASHED,
AGGSPLIT_INITIAL_SERIAL,
agg_info->group_clauses,
NIL,
&agg_costs,
dNumPartialGroups);
add_partial_path(grouped_rel, path);
}
}
/*
* make_rel_from_joinlist
* Build access paths using a "joinlist" to guide the join path search.
*
* See comments for deconstruct_jointree() for definition of the joinlist
* data structure.
*/
static RelOptInfo *
make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
{
int levels_needed;
List *initial_rels;
ListCell *jl;
/*
* Count the number of child joinlist nodes. This is the depth of the
* dynamic-programming algorithm we must employ to consider all ways of
* joining the child nodes.
*/
levels_needed = list_length(joinlist);
if (levels_needed <= 0)
return NULL; /* nothing to do? */
/*
* Construct a list of rels corresponding to the child joinlist nodes.
* This may contain both base rels and rels constructed according to
* sub-joinlists.
*/
initial_rels = NIL;
foreach(jl, joinlist)
{
Node *jlnode = (Node *) lfirst(jl);
RelOptInfo *thisrel;
if (IsA(jlnode, RangeTblRef))
{
int varno = ((RangeTblRef *) jlnode)->rtindex;
thisrel = find_base_rel(root, varno);
}
else if (IsA(jlnode, List))
{
/* Recurse to handle subproblem */
thisrel = make_rel_from_joinlist(root, (List *) jlnode);
}
else
{
elog(ERROR, "unrecognized joinlist node type: %d",
(int) nodeTag(jlnode));
thisrel = NULL; /* keep compiler quiet */
}
initial_rels = lappend(initial_rels, thisrel);
}
if (levels_needed == 1)
{
/*
* Single joinlist node, so we're done.
*/
return (RelOptInfo *) linitial(initial_rels);
}
else
{
/*
* Consider the different orders in which we could join the rels,
* using a plugin, GEQO, or the regular join search code.
*
* We put the initial_rels list into a PlannerInfo field because
* has_legal_joinclause() needs to look at it (ugly :-().
*/
root->initial_rels = initial_rels;
if (join_search_hook)
return (*join_search_hook) (root, levels_needed, initial_rels);
else if (enable_geqo && levels_needed >= geqo_threshold)
return geqo(root, levels_needed, initial_rels);
else
return standard_join_search(root, levels_needed, initial_rels);
}
}
/*
* standard_join_search
* Find possible joinpaths for a query by successively finding ways
* to join component relations into join relations.
*
* 'levels_needed' is the number of iterations needed, ie, the number of
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
* jointree item. These are the components to be joined together.
* Note that levels_needed == list_length(initial_rels).
*
* Returns the final level of join relations, i.e., the relation that is
* the result of joining all the original relations together.
* At least one implementation path must be provided for this relation and
* all required sub-relations.
*
* To support loadable plugins that modify planner behavior by changing the
* join searching algorithm, we provide a hook variable that lets a plugin
* replace or supplement this function. Any such hook must return the same
* final join relation as the standard code would, but it might have a
* different set of implementation paths attached, and only the sub-joinrels
* needed for these paths need have been instantiated.
*
* Note to plugin authors: the functions invoked during standard_join_search()
* modify root->join_rel_list and root->join_rel_hash. If you want to do more
* than one join-order search, you'll probably need to save and restore the
* original states of those data structures. See geqo_eval() for an example.
*/
RelOptInfo *
standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
{
int lev;
RelOptInfo *rel;
/*
* This function cannot be invoked recursively within any one planning
* problem, so join_rel_level[] can't be in use already.
*/
Assert(root->join_rel_level == NULL);
/*
* We employ a simple "dynamic programming" algorithm: we first find all
* ways to build joins of two jointree items, then all ways to build joins
* of three items (from two-item joins and single items), then four-item
* joins, and so on until we have considered all ways to join all the
* items into one rel.
*
* root->join_rel_level[j] is a list of all the j-item rels. Initially we
* set root->join_rel_level[1] to represent all the single-jointree-item
* relations.
*/
root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
root->join_rel_level[1] = initial_rels;
for (lev = 2; lev <= levels_needed; lev++)
{
ListCell *lc;
/*
* Determine all possible pairs of relations to be joined at this
* level, and build paths for making each one from every available
* pair of lower-level relations.
*/
join_search_one_level(root, lev);
/*
* Run generate_partitionwise_join_paths() and
* generate_useful_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within
* join_search_one_level.
*
* After that, we're done creating paths for the joinrel, so run
* set_cheapest().
*
* In addition, we also run generate_grouped_paths() for the grouped
* relation of each just-processed joinrel, and run set_cheapest() for
* the grouped relation afterwards.
*/
foreach(lc, root->join_rel_level[lev])
{
bool is_top_rel;
rel = (RelOptInfo *) lfirst(lc);
is_top_rel = bms_equal(rel->relids, root->all_query_rels);
/* Create paths for partitionwise joins. */
generate_partitionwise_join_paths(root, rel);
/*
* Except for the topmost scan/join rel, consider gathering
* partial paths. We'll do the same for the topmost scan/join rel
* once we know the final targetlist (see grouping_planner's and
* its call to apply_scanjoin_target_to_paths).
*/
if (!is_top_rel)
generate_useful_gather_paths(root, rel, false);
/* Find and save the cheapest paths for this rel */
set_cheapest(rel);
/*
* Except for the topmost scan/join rel, consider generating
* partial aggregation paths for the grouped relation on top of
* the paths of this rel. After that, we're done creating paths
* for the grouped relation, so run set_cheapest().
*/
if (rel->grouped_rel != NULL && !is_top_rel)
{
RelOptInfo *grouped_rel = rel->grouped_rel;
Assert(IS_GROUPED_REL(grouped_rel));
generate_grouped_paths(root, grouped_rel, rel);
set_cheapest(grouped_rel);
}
#ifdef OPTIMIZER_DEBUG
pprint(rel);
#endif
}
}
/*
* We should have a single rel at the final level.
*/
if (root->join_rel_level[levels_needed] == NIL)
elog(ERROR, "failed to build any %d-way joins", levels_needed);
Assert(list_length(root->join_rel_level[levels_needed]) == 1);
rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
root->join_rel_level = NULL;
return rel;
}
/*****************************************************************************
* PUSHING QUALS DOWN INTO SUBQUERIES
*****************************************************************************/
/*
* subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
*
* subquery is the particular component query being checked. topquery
* is the top component of a set-operations tree (the same Query if no
* set-op is involved).
*
* Conditions checked here:
*
* 1. If the subquery has a LIMIT clause, we must not push down any quals,
* since that could change the set of rows returned.
*
* 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
* quals into it, because that could change the results.
*
* 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
* This is because upper-level quals should semantically be evaluated only
* once per distinct row, not once per original row, and if the qual is
* volatile then extra evaluations could change the results. (This issue
* does not apply to other forms of aggregation such as GROUP BY, because
* when those are present we push into HAVING not WHERE, so that the quals
* are still applied after aggregation.)
*
* 4. If the subquery contains window functions, we cannot push volatile quals
* into it. The issue here is a bit different from DISTINCT: a volatile qual
* might succeed for some rows of a window partition and fail for others,
* thereby changing the partition contents and thus the window functions'
* results for rows that remain.
*
* 5. If the subquery contains any set-returning functions in its targetlist,
* we cannot push volatile quals into it. That would push them below the SRFs
* and thereby change the number of times they are evaluated. Also, a
* volatile qual could succeed for some SRF output rows and fail for others,
* a behavior that cannot occur if it's evaluated before SRF expansion.
*
* 6. If the subquery has nonempty grouping sets, we cannot push down any
* quals. The concern here is that a qual referencing a "constant" grouping
* column could get constant-folded, which would be improper because the value
* is potentially nullable by grouping-set expansion. This restriction could
* be removed if we had a parsetree representation that shows that such
* grouping columns are not really constant. (There are other ideas that
* could be used to relax this restriction, but that's the approach most
* likely to get taken in the future. Note that there's not much to be gained
* so long as subquery_planner can't move HAVING clauses to WHERE within such
* a subquery.)
*
* In addition, we make several checks on the subquery's output columns to see
* if it is safe to reference them in pushed-down quals. If output column k
* is found to be unsafe to reference, we set the reason for that inside
* safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
* column k might not be referenced by some/all quals. The unsafeFlags[]
* array will be consulted later by qual_is_pushdown_safe(). It's better to
* do it this way than to make the checks directly in qual_is_pushdown_safe(),
* because when the subquery involves set operations we have to check the
* output expressions in each arm of the set op.
*
* Note: pushing quals into a DISTINCT subquery is theoretically dubious:
* we're effectively assuming that the quals cannot distinguish values that
* the DISTINCT's equality operator sees as equal, yet there are many
* counterexamples to that assumption. However use of such a qual with a
* DISTINCT subquery would be unsafe anyway, since there's no guarantee which
* "equal" value will be chosen as the output value by the DISTINCT operation.
* So we don't worry too much about that. Another objection is that if the
* qual is expensive to evaluate, running it for each original row might cost
* more than we save by eliminating rows before the DISTINCT step. But it
* would be very hard to estimate that at this stage, and in practice pushdown
* seldom seems to make things worse, so we ignore that problem too.
*
* Note: likewise, pushing quals into a subquery with window functions is a
* bit dubious: the quals might remove some rows of a window partition while
* leaving others, causing changes in the window functions' results for the
* surviving rows. We insist that such a qual reference only partitioning
* columns, but again that only protects us if the qual does not distinguish
* values that the partitioning equality operator sees as equal. The risks
* here are perhaps larger than for DISTINCT, since no de-duplication of rows
* occurs and thus there is no theoretical problem with such a qual. But
* we'll do this anyway because the potential performance benefits are very
* large, and we've seen no field complaints about the longstanding comparable
* behavior with DISTINCT.
*/
static bool
subquery_is_pushdown_safe(Query *subquery, Query *topquery,
pushdown_safety_info *safetyInfo)
{
SetOperationStmt *topop;
/* Check point 1 */
if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
return false;
/* Check point 6 */
if (subquery->groupClause && subquery->groupingSets)
return false;
/* Check points 3, 4, and 5 */
if (subquery->distinctClause ||
subquery->hasWindowFuncs ||
subquery->hasTargetSRFs)
safetyInfo->unsafeVolatile = true;
/*
* If we're at a leaf query, check for unsafe expressions in its target
* list, and mark any reasons why they're unsafe in unsafeFlags[].
* (Non-leaf nodes in setop trees have only simple Vars in their tlists,
* so no need to check them.)
*/
if (subquery->setOperations == NULL)
check_output_expressions(subquery, safetyInfo);
/* Are we at top level, or looking at a setop component? */
if (subquery == topquery)
{
/* Top level, so check any component queries */
if (subquery->setOperations != NULL)
if (!recurse_pushdown_safe(subquery->setOperations, topquery,
safetyInfo))
return false;
}
else
{
/* Setop component must not have more components (too weird) */
if (subquery->setOperations != NULL)
return false;
/* Check whether setop component output types match top level */
topop = castNode(SetOperationStmt, topquery->setOperations);
Assert(topop);
compare_tlist_datatypes(subquery->targetList,
topop->colTypes,
safetyInfo);
}
return true;
}
/*
* Helper routine to recurse through setOperations tree
*/
static bool
recurse_pushdown_safe(Node *setOp, Query *topquery,
pushdown_safety_info *safetyInfo)
{
if (IsA(setOp, RangeTblRef))
{
RangeTblRef *rtr = (RangeTblRef *) setOp;
RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
Query *subquery = rte->subquery;
Assert(subquery != NULL);
return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
}
else if (IsA(setOp, SetOperationStmt))
{
SetOperationStmt *op = (SetOperationStmt *) setOp;
/* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
if (op->op == SETOP_EXCEPT)
return false;
/* Else recurse */
if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
return false;
if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
return false;
}
else
{
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(setOp));
}
return true;
}
/*
* check_output_expressions - check subquery's output expressions for safety
*
* There are several cases in which it's unsafe to push down an upper-level
* qual if it references a particular output column of a subquery. We check
* each output column of the subquery and set flags in unsafeFlags[k] when we
* see that column is unsafe for a pushed-down qual to reference. The
* conditions checked here are:
*
* 1. We must not push down any quals that refer to subselect outputs that
* return sets, else we'd introduce functions-returning-sets into the
* subquery's WHERE/HAVING quals.
*
* 2. We must not push down any quals that refer to subselect outputs that
* contain volatile functions, for fear of introducing strange results due
* to multiple evaluation of a volatile function.
*
* 3. If the subquery uses DISTINCT ON, we must not push down any quals that
* refer to non-DISTINCT output columns, because that could change the set
* of rows returned. (This condition is vacuous for DISTINCT, because then
* there are no non-DISTINCT output columns, so we needn't check. Note that
* subquery_is_pushdown_safe already reported that we can't use volatile
* quals if there's DISTINCT or DISTINCT ON.)
*
* 4. If the subquery has any window functions, we must not push down quals
* that reference any output columns that are not listed in all the subquery's
* window PARTITION BY clauses. We can push down quals that use only
* partitioning columns because they should succeed or fail identically for
* every row of any one window partition, and totally excluding some
* partitions will not change a window function's results for remaining
* partitions. (Again, this also requires nonvolatile quals, but
* subquery_is_pushdown_safe handles that.). Subquery columns marked as
* unsafe for this reason can still have WindowClause run conditions pushed
* down.
*/
static void
check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
{
List *flattened_targetList = subquery->targetList;
ListCell *lc;
/*
* We must be careful with grouping Vars and join alias Vars in the
* subquery's outputs, as they hide the underlying expressions.
*
* We need to expand grouping Vars to their underlying expressions (the
* grouping clauses) because the grouping expressions themselves might be
* volatile or set-returning. However, we do not need to expand join
* alias Vars, as their underlying structure does not introduce volatile
* or set-returning functions at the current level.
*
* In neither case do we need to recursively examine the Vars contained in
* these underlying expressions. Even if they reference outputs from
* lower-level subqueries (at any depth), those references are guaranteed
* not to expand to volatile or set-returning functions, because
* subqueries containing such functions in their targetlists are never
* pulled up.
*/
if (subquery->hasGroupRTE)
{
flattened_targetList = (List *)
flatten_group_exprs(NULL, subquery, (Node *) subquery->targetList);
}
foreach(lc, flattened_targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(lc);
if (tle->resjunk)
continue; /* ignore resjunk columns */
/* Functions returning sets are unsafe (point 1) */
if (subquery->hasTargetSRFs &&
(safetyInfo->unsafeFlags[tle->resno] &
UNSAFE_HAS_SET_FUNC) == 0 &&
expression_returns_set((Node *) tle->expr))
{
safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
continue;
}
/* Volatile functions are unsafe (point 2) */
if ((safetyInfo->unsafeFlags[tle->resno] &
UNSAFE_HAS_VOLATILE_FUNC) == 0 &&
contain_volatile_functions((Node *) tle->expr))
{
safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
continue;
}
/* If subquery uses DISTINCT ON, check point 3 */
if (subquery->hasDistinctOn &&
(safetyInfo->unsafeFlags[tle->resno] &
UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
!targetIsInSortList(tle, InvalidOid, subquery->distinctClause))
{
/* non-DISTINCT column, so mark it unsafe */
safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_DISTINCTON_CLAUSE;
continue;
}
/* If subquery uses window functions, check point 4 */
if (subquery->hasWindowFuncs &&
(safetyInfo->unsafeFlags[tle->resno] &
UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
!targetIsInAllPartitionLists(tle, subquery))
{
/* not present in all PARTITION BY clauses, so mark it unsafe */
safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_PARTITIONBY_CLAUSE;
continue;
}
}
}
/*
* For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
* push quals into each component query, but the quals can only reference
* subquery columns that suffer no type coercions in the set operation.
* Otherwise there are possible semantic gotchas. So, we check the
* component queries to see if any of them have output types different from
* the top-level setop outputs. We set the UNSAFE_TYPE_MISMATCH bit in
* unsafeFlags[k] if column k has different type in any component.
*
* We don't have to care about typmods here: the only allowed difference
* between set-op input and output typmods is input is a specific typmod
* and output is -1, and that does not require a coercion.
*
* tlist is a subquery tlist.
* colTypes is an OID list of the top-level setop's output column types.
* safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
*/
static void
compare_tlist_datatypes(List *tlist, List *colTypes,
pushdown_safety_info *safetyInfo)
{
ListCell *l;
ListCell *colType = list_head(colTypes);
foreach(l, tlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
if (tle->resjunk)
continue; /* ignore resjunk columns */
if (colType == NULL)
elog(ERROR, "wrong number of tlist entries");
if (exprType((Node *) tle->expr) != lfirst_oid(colType))
safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
colType = lnext(colTypes, colType);
}
if (colType != NULL)
elog(ERROR, "wrong number of tlist entries");
}
/*
* targetIsInAllPartitionLists
* True if the TargetEntry is listed in the PARTITION BY clause
* of every window defined in the query.
*
* It would be safe to ignore windows not actually used by any window
* function, but it's not easy to get that info at this stage; and it's
* unlikely to be useful to spend any extra cycles getting it, since
* unreferenced window definitions are probably infrequent in practice.
*/
static bool
targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
{
ListCell *lc;
foreach(lc, query->windowClause)
{
WindowClause *wc = (WindowClause *) lfirst(lc);
if (!targetIsInSortList(tle, InvalidOid, wc->partitionClause))
return false;
}
return true;
}
/*
* qual_is_pushdown_safe - is a particular rinfo safe to push down?
*
* rinfo is a restriction clause applying to the given subquery (whose RTE
* has index rti in the parent query).
*
* Conditions checked here:
*
* 1. rinfo's clause must not contain any SubPlans (mainly because it's
* unclear that it will work correctly: SubLinks will already have been
* transformed into SubPlans in the qual, but not in the subquery). Note that
* SubLinks that transform to initplans are safe, and will be accepted here
* because what we'll see in the qual is just a Param referencing the initplan
* output.
*
* 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
* functions.
*
* 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
* functions that are passed Var nodes, and therefore might reveal values from
* the subquery as side effects.
*
* 4. rinfo's clause must not refer to the whole-row output of the subquery
* (since there is no easy way to name that within the subquery itself).
*
* 5. rinfo's clause must not refer to any subquery output columns that were
* found to be unsafe to reference by subquery_is_pushdown_safe().
*/
static pushdown_safe_type
qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo,
pushdown_safety_info *safetyInfo)
{
pushdown_safe_type safe = PUSHDOWN_SAFE;
Node *qual = (Node *) rinfo->clause;
List *vars;
ListCell *vl;
/* Refuse subselects (point 1) */
if (contain_subplans(qual))
return PUSHDOWN_UNSAFE;
/* Refuse volatile quals if we found they'd be unsafe (point 2) */
if (safetyInfo->unsafeVolatile &&
contain_volatile_functions((Node *) rinfo))
return PUSHDOWN_UNSAFE;
/* Refuse leaky quals if told to (point 3) */
if (safetyInfo->unsafeLeaky &&
contain_leaked_vars(qual))
return PUSHDOWN_UNSAFE;
/*
* Examine all Vars used in clause. Since it's a restriction clause, all
* such Vars must refer to subselect output columns ... unless this is
* part of a LATERAL subquery, in which case there could be lateral
* references.
*
* By omitting the relevant flags, this also gives us a cheap sanity check
* that no aggregates or window functions appear in the qual. Those would
* be unsafe to push down, but at least for the moment we could never see
* any in a qual anyhow.
*/
vars = pull_var_clause(qual, PVC_INCLUDE_PLACEHOLDERS);
foreach(vl, vars)
{
Var *var = (Var *) lfirst(vl);
/*
* XXX Punt if we find any PlaceHolderVars in the restriction clause.
* It's not clear whether a PHV could safely be pushed down, and even
* less clear whether such a situation could arise in any cases of
* practical interest anyway. So for the moment, just refuse to push
* down.
*/
if (!IsA(var, Var))
{
safe = PUSHDOWN_UNSAFE;
break;
}
/*
* Punt if we find any lateral references. It would be safe to push
* these down, but we'd have to convert them into outer references,
* which subquery_push_qual lacks the infrastructure to do. The case
* arises so seldom that it doesn't seem worth working hard on.
*/
if (var->varno != rti)
{
safe = PUSHDOWN_UNSAFE;
break;
}
/* Subqueries have no system columns */
Assert(var->varattno >= 0);
/* Check point 4 */
if (var->varattno == 0)
{
safe = PUSHDOWN_UNSAFE;
break;
}
/* Check point 5 */
if (safetyInfo->unsafeFlags[var->varattno] != 0)
{
if (safetyInfo->unsafeFlags[var->varattno] &
(UNSAFE_HAS_VOLATILE_FUNC | UNSAFE_HAS_SET_FUNC |
UNSAFE_NOTIN_DISTINCTON_CLAUSE | UNSAFE_TYPE_MISMATCH))
{
safe = PUSHDOWN_UNSAFE;
break;
}
else
{
/* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
safe = PUSHDOWN_WINDOWCLAUSE_RUNCOND;
/* don't break, we might find another Var that's unsafe */
}
}
}
list_free(vars);
return safe;
}
/*
* subquery_push_qual - push down a qual that we have determined is safe
*/
static void
subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
{
if (subquery->setOperations != NULL)
{
/* Recurse to push it separately to each component query */
recurse_push_qual(subquery->setOperations, subquery,
rte, rti, qual);
}
else
{
/*
* We need to replace Vars in the qual (which must refer to outputs of
* the subquery) with copies of the subquery's targetlist expressions.
* Note that at this point, any uplevel Vars in the qual should have
* been replaced with Params, so they need no work.
*
* This step also ensures that when we are pushing into a setop tree,
* each component query gets its own copy of the qual.
*/
qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
subquery->targetList,
subquery->resultRelation,
REPLACEVARS_REPORT_ERROR, 0,
&subquery->hasSubLinks);
/*
* Now attach the qual to the proper place: normally WHERE, but if the
* subquery uses grouping or aggregation, put it in HAVING (since the
* qual really refers to the group-result rows).
*/
if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
subquery->havingQual = make_and_qual(subquery->havingQual, qual);
else
subquery->jointree->quals =
make_and_qual(subquery->jointree->quals, qual);
/*
* We need not change the subquery's hasAggs or hasSubLinks flags,
* since we can't be pushing down any aggregates that weren't there
* before, and we don't push down subselects at all.
*/
}
}
/*
* Helper routine to recurse through setOperations tree
*/
static void
recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual)
{
if (IsA(setOp, RangeTblRef))
{
RangeTblRef *rtr = (RangeTblRef *) setOp;
RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
Query *subquery = subrte->subquery;
Assert(subquery != NULL);
subquery_push_qual(subquery, rte, rti, qual);
}
else if (IsA(setOp, SetOperationStmt))
{
SetOperationStmt *op = (SetOperationStmt *) setOp;
recurse_push_qual(op->larg, topquery, rte, rti, qual);
recurse_push_qual(op->rarg, topquery, rte, rti, qual);
}
else
{
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(setOp));
}
}
/*****************************************************************************
* SIMPLIFYING SUBQUERY TARGETLISTS
*****************************************************************************/
/*
* remove_unused_subquery_outputs
* Remove subquery targetlist items we don't need
*
* It's possible, even likely, that the upper query does not read all the
* output columns of the subquery. We can remove any such outputs that are
* not needed by the subquery itself (e.g., as sort/group columns) and do not
* affect semantics otherwise (e.g., volatile functions can't be removed).
* This is useful not only because we might be able to remove expensive-to-
* compute expressions, but because deletion of output columns might allow
* optimizations such as join removal to occur within the subquery.
*
* extra_used_attrs can be passed as non-NULL to mark any columns (offset by
* FirstLowInvalidHeapAttributeNumber) that we should not remove. This
* parameter is modified by the function, so callers must make a copy if they
* need to use the passed in Bitmapset after calling this function.
*
* To avoid affecting column numbering in the targetlist, we don't physically
* remove unused tlist entries, but rather replace their expressions with NULL
* constants. This is implemented by modifying subquery->targetList.
*/
static void
remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
Bitmapset *extra_used_attrs)
{
Bitmapset *attrs_used;
ListCell *lc;
/*
* Just point directly to extra_used_attrs. No need to bms_copy as none of
* the current callers use the Bitmapset after calling this function.
*/
attrs_used = extra_used_attrs;
/*
* Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
* could update all the child SELECTs' tlists, but it seems not worth the
* trouble presently.
*/
if (subquery->setOperations)
return;
/*
* If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
* time: all its output columns must be used in the distinctClause.
*/
if (subquery->distinctClause && !subquery->hasDistinctOn)
return;
/*
* Collect a bitmap of all the output column numbers used by the upper
* query.
*
* Add all the attributes needed for joins or final output. Note: we must
* look at rel's targetlist, not the attr_needed data, because attr_needed
* isn't computed for inheritance child rels, cf set_append_rel_size().
* (XXX might be worth changing that sometime.)
*/
pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
/* Add all the attributes used by un-pushed-down restriction clauses. */
foreach(lc, rel->baserestrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
}
/*
* If there's a whole-row reference to the subquery, we can't remove
* anything.
*/
if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, attrs_used))
return;
/*
* Run through the tlist and zap entries we don't need. It's okay to
* modify the tlist items in-place because set_subquery_pathlist made a
* copy of the subquery.
*/
foreach(lc, subquery->targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(lc);
Node *texpr = (Node *) tle->expr;
/*
* If it has a sortgroupref number, it's used in some sort/group
* clause so we'd better not remove it. Also, don't remove any
* resjunk columns, since their reason for being has nothing to do
* with anybody reading the subquery's output. (It's likely that
* resjunk columns in a sub-SELECT would always have ressortgroupref
* set, but even if they don't, it seems imprudent to remove them.)
*/
if (tle->ressortgroupref || tle->resjunk)
continue;
/*
* If it's used by the upper query, we can't remove it.
*/
if (bms_is_member(tle->resno - FirstLowInvalidHeapAttributeNumber,
attrs_used))
continue;
/*
* If it contains a set-returning function, we can't remove it since
* that could change the number of rows returned by the subquery.
*/
if (subquery->hasTargetSRFs &&
expression_returns_set(texpr))
continue;
/*
* If it contains volatile functions, we daren't remove it for fear
* that the user is expecting their side-effects to happen.
*/
if (contain_volatile_functions(texpr))
continue;
/*
* OK, we don't need it. Replace the expression with a NULL constant.
* Preserve the exposed type of the expression, in case something
* looks at the rowtype of the subquery's result.
*/
tle->expr = (Expr *) makeNullConst(exprType(texpr),
exprTypmod(texpr),
exprCollation(texpr));
}
}
/*
* create_partial_bitmap_paths
* Build partial bitmap heap path for the relation
*/
void
create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel,
Path *bitmapqual)
{
int parallel_workers;
double pages_fetched;
/* Compute heap pages for bitmap heap scan */
pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
NULL, NULL);
parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
max_parallel_workers_per_gather);
if (parallel_workers <= 0)
return;
add_partial_path(rel, (Path *) create_bitmap_heap_path(root, rel,
bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
}
/*
* Compute the number of parallel workers that should be used to scan a
* relation. We compute the parallel workers based on the size of the heap to
* be scanned and the size of the index to be scanned, then choose a minimum
* of those.
*
* "heap_pages" is the number of pages from the table that we expect to scan, or
* -1 if we don't expect to scan any.
*
* "index_pages" is the number of pages from the index that we expect to scan, or
* -1 if we don't expect to scan any.
*
* "max_workers" is caller's limit on the number of workers. This typically
* comes from a GUC.
*/
int
compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
int max_workers)
{
int parallel_workers = 0;
/*
* If the user has set the parallel_workers reloption, use that; otherwise
* select a default number of workers.
*/
if (rel->rel_parallel_workers != -1)
parallel_workers = rel->rel_parallel_workers;
else
{
/*
* If the number of pages being scanned is insufficient to justify a
* parallel scan, just return zero ... unless it's an inheritance
* child. In that case, we want to generate a parallel path here
* anyway. It might not be worthwhile just for this relation, but
* when combined with all of its inheritance siblings it may well pay
* off.
*/
if (rel->reloptkind == RELOPT_BASEREL &&
((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
(index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
return 0;
if (heap_pages >= 0)
{
int heap_parallel_threshold;
int heap_parallel_workers = 1;
/*
* Select the number of workers based on the log of the size of
* the relation. This probably needs to be a good deal more
* sophisticated, but we need something here for now. Note that
* the upper limit of the min_parallel_table_scan_size GUC is
* chosen to prevent overflow here.
*/
heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
{
heap_parallel_workers++;
heap_parallel_threshold *= 3;
if (heap_parallel_threshold > INT_MAX / 3)
break; /* avoid overflow */
}
parallel_workers = heap_parallel_workers;
}
if (index_pages >= 0)
{
int index_parallel_workers = 1;
int index_parallel_threshold;
/* same calculation as for heap_pages above */
index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
{
index_parallel_workers++;
index_parallel_threshold *= 3;
if (index_parallel_threshold > INT_MAX / 3)
break; /* avoid overflow */
}
if (parallel_workers > 0)
parallel_workers = Min(parallel_workers, index_parallel_workers);
else
parallel_workers = index_parallel_workers;
}
}
/* In no case use more than caller supplied maximum number of workers */
parallel_workers = Min(parallel_workers, max_workers);
return parallel_workers;
}
/*
* generate_partitionwise_join_paths
* Create paths representing partitionwise join for given partitioned
* join relation.
*
* This must not be called until after we are done adding paths for all
* child-joins. Otherwise, add_path might delete a path to which some path
* generated here has a reference.
*/
void
generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
{
List *live_children = NIL;
int cnt_parts;
int num_parts;
RelOptInfo **part_rels;
/* Handle only join relations here. */
if (!IS_JOIN_REL(rel))
return;
/* We've nothing to do if the relation is not partitioned. */
if (!IS_PARTITIONED_REL(rel))
return;
/* The relation should have consider_partitionwise_join set. */
Assert(rel->consider_partitionwise_join);
/* Guard against stack overflow due to overly deep partition hierarchy. */
check_stack_depth();
num_parts = rel->nparts;
part_rels = rel->part_rels;
/* Collect non-dummy child-joins. */
for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
{
RelOptInfo *child_rel = part_rels[cnt_parts];
/* If it's been pruned entirely, it's certainly dummy. */
if (child_rel == NULL)
continue;
/* Make partitionwise join paths for this partitioned child-join. */
generate_partitionwise_join_paths(root, child_rel);
/* If we failed to make any path for this child, we must give up. */
if (child_rel->pathlist == NIL)
{
/*
* Mark the parent joinrel as unpartitioned so that later
* functions treat it correctly.
*/
rel->nparts = 0;
return;
}
/* Else, identify the cheapest path for it. */
set_cheapest(child_rel);
/* Dummy children need not be scanned, so ignore those. */
if (IS_DUMMY_REL(child_rel))
continue;
/*
* Except for the topmost scan/join rel, consider generating partial
* aggregation paths for the grouped relation on top of the paths of
* this partitioned child-join. After that, we're done creating paths
* for the grouped relation, so run set_cheapest().
*/
if (child_rel->grouped_rel != NULL &&
!bms_equal(IS_OTHER_REL(rel) ?
rel->top_parent_relids : rel->relids,
root->all_query_rels))
{
RelOptInfo *grouped_rel = child_rel->grouped_rel;
Assert(IS_GROUPED_REL(grouped_rel));
generate_grouped_paths(root, grouped_rel, child_rel);
set_cheapest(grouped_rel);
}
#ifdef OPTIMIZER_DEBUG
pprint(child_rel);
#endif
live_children = lappend(live_children, child_rel);
}
/* If all child-joins are dummy, parent join is also dummy. */
if (!live_children)
{
mark_dummy_rel(rel);
return;
}
/* Build additional paths for this rel from child-join paths. */
add_paths_to_append_rel(root, rel, live_children);
list_free(live_children);
} | c | github | https://github.com/postgres/postgres | src/backend/optimizer/path/allpaths.c |
from PySide import QtGui, QtCore
class Tree(QtGui.QTreeView):
def __init__(self, parent=None):
super(Tree, self).__init__(parent)
self.dir_path = None
# TODO: resize tree according to contents.
# For now, max width is hardcoded.
self.setMaximumWidth(180)
def load_from_dir(self, dir_path):
""" Load directory containing file into the tree. """
# If it's the same dir as before, return to avoid redrawing
if dir_path == self.dir_path:
return
# Store the path info
self.dir_path = dir_path
# Link the tree to a model
model = QtGui.QFileSystemModel()
model.setRootPath(dir_path)
self.setModel(model)
# Set the tree's index to the root of the model
indexRoot = model.index(model.rootPath())
self.setRootIndex(indexRoot)
# Display tree cleanly
self.hide_unwanted_info()
def hide_unwanted_info(self):
""" Hides unneeded columns and header. """
# Hide tree size and date columns
self.hideColumn(1)
self.hideColumn(2)
self.hideColumn(3)
# Hide tree header
self.setHeaderHidden(True)
def selectionChanged(self, selected, deselected):
"""
Event handler for selection changes.
Triggers a fileChanged event in parent window.
"""
super(Tree, self).selectionChanged(selected, deselected)
indexes = selected.indexes()
if indexes:
# Handle fileChanged event in main window
new_filename = self.model().data(indexes[0])
main_win = self.parent().parent()
main_win.handleFileChanged(self.dir_path, new_filename) | unknown | codeparrot/codeparrot-clean | ||
"""
The :mod:`websockets.client` module defines a simple WebSocket client API.
"""
__all__ = ['connect', 'WebSocketClientProtocol']
import asyncio
from .exceptions import InvalidHandshake
from .handshake import build_request, check_response
from .http import read_response, USER_AGENT
from .protocol import WebSocketCommonProtocol
from .uri import parse_uri
class WebSocketClientProtocol(WebSocketCommonProtocol):
"""
Complete WebSocket client implementation as an asyncio protocol.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
"""
is_client = True
state = 'CONNECTING'
@asyncio.coroutine
def handshake(self, wsuri, origin=None):
"""
Perform the client side of the opening handshake.
If provided, ``origin`` sets the HTTP Origin header.
"""
headers = []
set_header = lambda k, v: headers.append((k, v))
if wsuri.port == (443 if wsuri.secure else 80): # pragma: no cover
set_header('Host', wsuri.host)
else:
set_header('Host', '{}:{}'.format(wsuri.host, wsuri.port))
if origin is not None:
set_header('Origin', origin)
set_header('User-Agent', USER_AGENT)
key = build_request(set_header)
self.raw_request_headers = headers
# Send handshake request. Since the URI and the headers only contain
# ASCII characters, we can keep this simple.
request = ['GET %s HTTP/1.1' % wsuri.resource_name]
request.extend('{}: {}'.format(k, v) for k, v in headers)
request.append('\r\n')
request = '\r\n'.join(request).encode()
self.writer.write(request)
# Read handshake response.
try:
status_code, headers = yield from read_response(self.reader)
except Exception as exc:
raise InvalidHandshake("Malformed HTTP message") from exc
if status_code != 101:
raise InvalidHandshake("Bad status code: {}".format(status_code))
self.raw_response_headers = list(headers.raw_items())
get_header = lambda k: headers.get(k, '')
check_response(get_header, key)
self.state = 'OPEN'
self.opening_handshake.set_result(True)
@asyncio.coroutine
def connect(uri, *,
klass=WebSocketClientProtocol, origin=None, **kwds):
"""
This coroutine connects to a WebSocket server.
It accepts an ``origin`` keyword argument to set the Origin HTTP header.
It's a thin wrapper around the event loop's `create_connection` method.
Extra keyword arguments are passed to `create_server`.
It returns a :class:`~websockets.client.WebSocketClientProtocol` which can
then be used to send and receive messages.
It raises :exc:`~websockets.uri.InvalidURI` if `uri` is invalid and
:exc:`~websockets.handshake.InvalidHandshake` if the handshake fails.
Clients shouldn't close the WebSocket connection. Instead, they should
wait until the server performs the closing handshake by yielding from the
protocol's :attr:`worker` attribute.
:func:`connect` implements the sequence called "Establish a WebSocket
Connection" in RFC 6455, except for the requirement that "there MUST be no
more than one connection in a CONNECTING state."
"""
wsuri = parse_uri(uri)
if wsuri.secure:
kwds.setdefault('ssl', True)
elif 'ssl' in kwds:
raise ValueError("connect() received a SSL context for a ws:// URI. "
"Use a wss:// URI to enable TLS.")
factory = lambda: klass(host=wsuri.host, port=wsuri.port, secure=wsuri.secure)
transport, protocol = yield from asyncio.get_event_loop().create_connection(
factory, wsuri.host, wsuri.port, **kwds)
try:
yield from protocol.handshake(wsuri, origin=origin)
except Exception:
protocol.writer.close()
raise
return protocol | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Each active thread has an ThreadIdentity that may represent the thread in
// various level interfaces. ThreadIdentity objects are never deallocated.
// When a thread terminates, its ThreadIdentity object may be reused for a
// thread created later.
#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
#ifndef _WIN32
#include <pthread.h>
// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
// supported.
#include <unistd.h>
#endif
#include <atomic>
#include <cstdint>
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
struct SynchLocksHeld;
struct SynchWaitParams;
namespace base_internal {
class SpinLock;
struct ThreadIdentity;
// Used by the implementation of absl::Mutex and absl::CondVar.
struct PerThreadSynch {
// The internal representation of absl::Mutex and absl::CondVar rely
// on the alignment of PerThreadSynch. Both store the address of the
// PerThreadSynch in the high-order bits of their internal state,
// which means the low kLowZeroBits of the address of PerThreadSynch
// must be zero.
static constexpr int kLowZeroBits = 8;
static constexpr int kAlignment = 1 << kLowZeroBits;
// Returns the associated ThreadIdentity.
// This can be implemented as a cast because we guarantee
// PerThreadSynch is the first element of ThreadIdentity.
ThreadIdentity* thread_identity() {
return reinterpret_cast<ThreadIdentity*>(this);
}
PerThreadSynch* next; // Circular waiter queue; initialized to 0.
PerThreadSynch* skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same
// condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
bool wake; // This thread is to be woken from a Mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
//
// The value of "x->cond_waiter" is meaningless if "x" is not on a
// Mutex waiter list.
bool cond_waiter;
bool maybe_unlocking; // Valid at head of Mutex waiter queue;
// true if UnlockSlow could be searching
// for a waiter to wake. Used for an optimization
// in Enqueue(). true is always a valid value.
// Can be reset to false when the unlocker or any
// writer releases the lock, or a reader fully
// releases the lock. It may not be set to false
// by a reader that decrements the count to
// non-zero. protected by mutex spinlock
bool suppress_fatal_errors; // If true, try to proceed even in the face
// of broken invariants. This is used within
// fatal signal handlers to improve the
// chances of debug logging information being
// output successfully.
int priority; // Priority of thread (updated every so often).
// State values:
// kAvailable: This PerThreadSynch is available.
// kQueued: This PerThreadSynch is unavailable, it's currently queued on a
// Mutex or CondVar waistlist.
//
// Transitions from kQueued to kAvailable require a release
// barrier. This is needed as a waiter may use "state" to
// independently observe that it's no longer queued.
//
// Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex.
enum State { kAvailable, kQueued };
std::atomic<State> state;
// The wait parameters of the current wait. waitp is null if the
// thread is not waiting. Transitions from null to non-null must
// occur before the enqueue commit point (state = kQueued in
// Enqueue() and CondVarEnqueue()). Transitions from non-null to
// null must occur after the wait is finished (state = kAvailable in
// Mutex::Block() and CondVar::WaitCommon()). This field may be
// changed only by the thread that describes this PerThreadSynch. A
// special case is Fer(), which calls Enqueue() on another thread,
// but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams* waitp;
intptr_t readers; // Number of readers in mutex.
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld* all_locks;
};
// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
//
// NOTE: The layout of fields in this structure is critical, please do not
// add, remove, or modify the field placements without fully auditing the
// layout.
struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
// PerThreadSynch::kAlignment aligned. We provide this alignment on
// ThreadIdentity itself.
PerThreadSynch per_thread_synch;
// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState {
alignas(void*) char data[256];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
std::atomic<int>* blocked_count_ptr;
// The following variables are mostly read/written just by the
// thread itself. The only exception is that these are read by
// a ticker thread as a hint.
std::atomic<int> ticker; // Tick counter, incremented once per second.
std::atomic<int> wait_start; // Ticker value when thread started waiting.
std::atomic<bool> is_idle; // Has thread become idle yet?
ThreadIdentity* next;
};
// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime. The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
// If one does not exist, return nullptr instead.
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
// is handled internally within tcmalloc's initialization already. Note that
// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
// on Apple platforms. Whatever function is calling your MallocHooks will need
// to watch for recursion on Apple platforms.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
ThreadIdentity* CurrentThreadIdentityIfPresent();
using ThreadIdentityReclaimerFunction = void (*)(void*);
// Sets the current thread identity to the given value. 'reclaimer' is a
// pointer to the global function for cleaning up instances on thread
// destruction.
void SetCurrentThreadIdentity(ThreadIdentity* identity,
ThreadIdentityReclaimerFunction reclaimer);
// Removes the currently associated ThreadIdentity from the running thread.
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
// from that function.
void ClearCurrentThreadIdentity();
// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
// index>
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
#endif
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
#endif
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
#endif
#ifdef ABSL_THREAD_IDENTITY_MODE
#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
// Note: Current default for production systems.
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
#else
#define ABSL_THREAD_IDENTITY_MODE \
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#endif
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#if ABSL_PER_THREAD_TLS
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
thread_identity_ptr;
#elif defined(ABSL_HAVE_THREAD_LOCAL)
ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#else
#error Thread-local storage not detected on this platform
#endif
// thread_local variables cannot be in headers exposed by DLLs or in certain
// build configurations on Apple platforms. However, it is important for
// performance reasons in general that `CurrentThreadIdentityIfPresent` be
// inlined. In the other cases we opt to have the function not be inlined. Note
// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
// this entire inline definition.
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
!defined(ABSL_CONSUME_DLL)
#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
#endif
#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
return thread_identity_ptr;
}
#endif
#elif ABSL_THREAD_IDENTITY_MODE != \
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error Unknown ABSL_THREAD_IDENTITY_MODE
#endif
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ | c | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/absl/base/internal/thread_identity.h |
from django import template
from django.contrib.admin.utils import quote
from django.core.urlresolvers import Resolver404, get_script_prefix, resolve
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
register = template.Library()
@register.filter
def admin_urlname(value, arg):
return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg)
@register.filter
def admin_urlquote(value):
return quote(value)
@register.simple_tag(takes_context=True)
def add_preserved_filters(context, url, popup=False, to_field=None):
opts = context.get('opts')
preserved_filters = context.get('preserved_filters')
parsed_url = list(urlparse(url))
parsed_qs = dict(parse_qsl(parsed_url[4]))
merged_qs = dict()
if opts and preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters))
match_url = '/%s' % url.partition(get_script_prefix())[2]
try:
match = resolve(match_url)
except Resolver404:
pass
else:
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if changelist_url == current_url and '_changelist_filters' in preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))
merged_qs.update(preserved_filters)
if popup:
from django.contrib.admin.options import IS_POPUP_VAR
merged_qs[IS_POPUP_VAR] = 1
if to_field:
from django.contrib.admin.options import TO_FIELD_VAR
merged_qs[TO_FIELD_VAR] = to_field
merged_qs.update(parsed_qs)
parsed_url[4] = urlencode(merged_qs)
return urlunparse(parsed_url) | unknown | codeparrot/codeparrot-clean | ||
"""
Tests for kqueue wrapper.
"""
import errno
import os
import select
import socket
import sys
import time
import unittest
from test import support
if not hasattr(select, "kqueue"):
raise unittest.SkipTest("test works only on BSD")
class TestKQueue(unittest.TestCase):
def test_create_queue(self):
kq = select.kqueue()
self.assertTrue(kq.fileno() > 0, kq.fileno())
self.assertTrue(not kq.closed)
kq.close()
self.assertTrue(kq.closed)
self.assertRaises(ValueError, kq.fileno)
def test_create_event(self):
from operator import lt, le, gt, ge
fd = os.open(os.devnull, os.O_WRONLY)
self.addCleanup(os.close, fd)
ev = select.kevent(fd)
other = select.kevent(1000)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_READ)
self.assertEqual(ev.flags, select.KQ_EV_ADD)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
self.assertTrue(ev < other)
self.assertTrue(other >= ev)
for op in lt, le, gt, ge:
self.assertRaises(TypeError, op, ev, None)
self.assertRaises(TypeError, op, ev, 1)
self.assertRaises(TypeError, op, ev, "ev")
ev = select.kevent(fd, select.KQ_FILTER_WRITE)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_WRITE)
self.assertEqual(ev.flags, select.KQ_EV_ADD)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
ev = select.kevent(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ONESHOT)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_WRITE)
self.assertEqual(ev.flags, select.KQ_EV_ONESHOT)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
ev = select.kevent(1, 2, 3, 4, 5, 6)
self.assertEqual(ev.ident, 1)
self.assertEqual(ev.filter, 2)
self.assertEqual(ev.flags, 3)
self.assertEqual(ev.fflags, 4)
self.assertEqual(ev.data, 5)
self.assertEqual(ev.udata, 6)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
bignum = sys.maxsize * 2 + 1
ev = select.kevent(bignum, 1, 2, 3, sys.maxsize, bignum)
self.assertEqual(ev.ident, bignum)
self.assertEqual(ev.filter, 1)
self.assertEqual(ev.flags, 2)
self.assertEqual(ev.fflags, 3)
self.assertEqual(ev.data, sys.maxsize)
self.assertEqual(ev.udata, bignum)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
def test_queue_event(self):
serverSocket = socket.socket()
serverSocket.bind(('127.0.0.1', 0))
serverSocket.listen(1)
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', serverSocket.getsockname()[1]))
except socket.error as e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
else:
#raise AssertionError("Connect should have raised EINPROGRESS")
pass # FreeBSD doesn't raise an exception here
server, addr = serverSocket.accept()
if sys.platform.startswith("darwin"):
flags = select.KQ_EV_ADD | select.KQ_EV_ENABLE
else:
flags = 0
kq = select.kqueue()
kq2 = select.kqueue.fromfd(kq.fileno())
ev = select.kevent(server.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq.control([ev], 0)
ev = select.kevent(server.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq2.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq2.control([ev], 0)
events = kq.control(None, 4, 1)
events = [(e.ident, e.filter, e.flags) for e in events]
events.sort()
self.assertEqual(events, [
(client.fileno(), select.KQ_FILTER_WRITE, flags),
(server.fileno(), select.KQ_FILTER_WRITE, flags)])
client.send(b"Hello!")
server.send(b"world!!!")
# We may need to call it several times
for i in range(10):
events = kq.control(None, 4, 1)
if len(events) == 4:
break
time.sleep(1.0)
else:
self.fail('timeout waiting for event notifications')
events = [(e.ident, e.filter, e.flags) for e in events]
events.sort()
self.assertEqual(events, [
(client.fileno(), select.KQ_FILTER_WRITE, flags),
(client.fileno(), select.KQ_FILTER_READ, flags),
(server.fileno(), select.KQ_FILTER_WRITE, flags),
(server.fileno(), select.KQ_FILTER_READ, flags)])
# Remove completely client, and server read part
ev = select.kevent(client.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
kq.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
kq.control([ev], 0)
ev = select.kevent(server.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
kq.control([ev], 0, 0)
events = kq.control([], 4, 0.99)
events = [(e.ident, e.filter, e.flags) for e in events]
events.sort()
self.assertEqual(events, [
(server.fileno(), select.KQ_FILTER_WRITE, flags)])
client.close()
server.close()
serverSocket.close()
def testPair(self):
kq = select.kqueue()
a, b = socket.socketpair()
a.send(b'foo')
event1 = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
event2 = select.kevent(b, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
r = kq.control([event1, event2], 1, 1)
self.assertTrue(r)
self.assertFalse(r[0].flags & select.KQ_EV_ERROR)
self.assertEqual(b.recv(r[0].data), b'foo')
a.close()
b.close()
kq.close()
def test_main():
support.run_unittest(TestKQueue)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
#################################### IMPORTS ###################################
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
import unittest
if is_pygame_pkg:
from pygame.tests.test_utils import question, prompt
else:
from test.test_utils import question, prompt
import pygame
################################################################################
class CdromModuleTest(unittest.TestCase):
def todo_test_CD(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD:
# pygame.cdrom.CD(id): return CD
# class to manage a cdrom drive
#
# You can create a CD object for each cdrom on the system. Use
# pygame.cdrom.get_count() to determine how many drives actually
# exist. The id argument is an integer of the drive, starting at zero.
#
# The CD object is not initialized, you can only call CD.get_id() and
# CD.get_name() on an uninitialized drive.
#
# It is safe to create multiple CD objects for the same drive, they
# will all cooperate normally.
#
self.fail()
def todo_test_get_count(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.get_count:
# pygame.cdrom.get_count(): return count
# number of cd drives on the system
#
# Return the number of cd drives on the system. When you create CD
# objects you need to pass an integer id that must be lower than this
# count. The count will be 0 if there are no drives on the system.
#
self.fail()
def todo_test_get_init(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.get_init:
# pygame.cdrom.get_init(): return bool
# true if the cdrom module is initialized
#
# Test if the cdrom module is initialized or not. This is different
# than the CD.init() since each drive must also be initialized
# individually.
#
self.fail()
def todo_test_init(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.init:
# pygame.cdrom.init(): return None
# initialize the cdrom module
#
# Initialize the cdrom module. This will scan the system for all CD
# devices. The module must be initialized before any other functions
# will work. This automatically happens when you call pygame.init().
#
# It is safe to call this function more than once.
self.fail()
def todo_test_quit(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.quit:
# pygame.cdrom.quit(): return None
# uninitialize the cdrom module
#
# Uninitialize the cdrom module. After you call this any existing CD
# objects will no longer work.
#
# It is safe to call this function more than once.
self.fail()
class CDTypeTest(unittest.TestCase):
def setUp(self):
pygame.cdrom.init()
#TODO:
try:
self.cd = pygame.cdrom.CD(0)
except pygame.error:
self.cd = None
def tearDown(self):
pygame.cdrom.quit()
def test_1_eject(self):
# __doc__ (as of 2008-07-02) for pygame.cdrom.CD.eject:
# CD.eject(): return None
# eject or open the cdrom drive
# should raise if cd object not initialized
if self.cd:
self.cd.init()
self.cd.eject()
self.assert_(question('Did the cd eject?'))
prompt("Please close the cd drive")
def test_2_get_name(self):
# __doc__ (as of 2008-07-02) for pygame.cdrom.CD.get_name:
# CD.get_name(): return name
# the system name of the cdrom drive
if self.cd:
cd_name = self.cd.get_name()
self.assert_ (
question('Is %s the correct name for the cd drive?' % cd_name)
)
def todo_test_get_all(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_all:
# CD.get_all(): return [(audio, start, end, lenth), ...]
# get all track information
#
# Return a list with information for every track on the cdrom. The
# information consists of a tuple with four values. The audio value is
# True if the track contains audio data. The start, end, and length
# values are floating point numbers in seconds. Start and end
# represent absolute times on the entire disc.
#
self.fail()
def todo_test_get_busy(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_busy:
# CD.get_busy(): return bool
# true if the drive is playing audio
#
# Returns True if the drive busy playing back audio.
self.fail()
def todo_test_get_current(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_current:
# CD.get_current(): return track, seconds
# the current audio playback position
#
# Returns both the current track and time of that track. This method
# works when the drive is either playing or paused.
#
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_get_empty(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_empty:
# CD.get_empty(): return bool
# False if a cdrom is in the drive
#
# Return False if there is a cdrom currently in the drive. If the
# drive is empty this will return True.
#
self.fail()
def todo_test_get_id(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_id:
# CD.get_init(): return bool
# true if this cd device initialized
#
# Returns the integer id that was used to create the CD instance. This
# method can work on an uninitialized CD.
#
self.fail()
def todo_test_get_init(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_init:
# CD.get_init(): return bool
# true if this cd device initialized
#
# Test if this CDROM device is initialized. This is different than the
# pygame.cdrom.init() since each drive must also be initialized
# individually.
#
self.fail()
def todo_test_get_numtracks(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_numtracks:
# CD.get_numtracks(): return count
# the number of tracks on the cdrom
#
# Return the number of tracks on the cdrom in the drive. This will
# return zero of the drive is empty or has no tracks.
#
self.fail()
def todo_test_get_paused(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_paused:
# CD.get_paused(): return bool
# true if the drive is paused
#
# Returns True if the drive is currently paused.
self.fail()
def todo_test_get_track_audio(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_audio:
# CD.get_track_audio(track): return bool
# true if the cdrom track has audio data
#
# Determine if a track on a cdrom contains audio data. You can also
# call CD.num_tracks() and CD.get_all() to determine more information
# about the cdrom.
#
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_get_track_length(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_length:
# CD.get_track_length(track): return seconds
# length of a cdrom track
#
# Return a floating point value in seconds of the length of the cdrom track.
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_get_track_start(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_start:
# CD.get_track_start(track): return seconds
# start time of a cdrom track
#
# Return the absolute time in seconds where at start of the cdrom track.
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_init(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.init:
# CD.init(): return None
# initialize a cdrom drive for use
#
# Initialize the cdrom drive for use. The drive must be initialized
# for most CD methods to work. Even if the rest of pygame has been
# initialized.
#
# There may be a brief pause while the drive is initialized. Avoid
# CD.init() if the program should not stop for a second or two.
#
self.fail()
def todo_test_pause(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.pause:
# CD.pause(): return None
# temporarily stop audio playback
#
# Temporarily stop audio playback on the CD. The playback can be
# resumed at the same point with the CD.resume() method. If the CD is
# not playing this method does nothing.
#
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_play(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.play:
# CD.init(): return None
# initialize a cdrom drive for use
#
# Playback audio from an audio cdrom in the drive. Besides the track
# number argument, you can also pass a starting and ending time for
# playback. The start and end time are in seconds, and can limit the
# section of an audio track played.
#
# If you pass a start time but no end, the audio will play to the end
# of the track. If you pass a start time and 'None' for the end time,
# the audio will play to the end of the entire disc.
#
# See the CD.get_numtracks() and CD.get_track_audio() to find tracks to playback.
# Note, track 0 is the first track on the CD. Track numbers start at zero.
self.fail()
def todo_test_quit(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.quit:
# CD.quit(): return None
# uninitialize a cdrom drive for use
#
# Uninitialize a drive for use. Call this when your program will not
# be accessing the drive for awhile.
#
self.fail()
def todo_test_resume(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.resume:
# CD.resume(): return None
# unpause audio playback
#
# Unpause a paused CD. If the CD is not paused or already playing,
# this method does nothing.
#
self.fail()
def todo_test_stop(self):
# __doc__ (as of 2008-08-02) for pygame.cdrom.CD.stop:
# CD.stop(): return None
# stop audio playback
#
# Stops playback of audio from the cdrom. This will also lose the
# current playback position. This method does nothing if the drive
# isn't already playing audio.
#
self.fail()
################################################################################
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
"""
Tools for memoization of function results.
"""
from functools import wraps
from six import iteritems
from weakref import WeakKeyDictionary
class lazyval(object):
"""
Decorator that marks that an attribute should not be computed until
needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import lazyval
>>> class C(object):
... def __init__(self):
... self.count = 0
... @lazyval
... def val(self):
... self.count += 1
... return "val"
...
>>> c = C()
>>> c.count
0
>>> c.val, c.count
('val', 1)
>>> c.val, c.count
('val', 1)
"""
def __init__(self, get):
self._get = get
self._cache = WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
self._cache[instance] = val = self._get(instance)
return val
def remember_last(f):
"""
Decorator that remembers the last computed value of a function and doesn't
recompute it when called with the same inputs multiple times.
Parameters
----------
f : The function to be memoized. All arguments to f should be hashable.
Example
-------
>>> counter = 0
>>> @remember_last
... def foo(x):
... global counter
... counter += 1
... return x, counter
>>> foo(1)
(1, 1)
>>> foo(1)
(1, 1)
>>> foo(0)
(0, 2)
>>> foo(1)
(1, 3)
Notes
-----
This decorator is equivalent to `lru_cache(1)` in Python 3, but with less
bells and whistles for handling things like threadsafety. If we ever
decide we need such bells and whistles, we should just make functools32 a
dependency.
"""
# This needs to be a mutable data structure so we can change it from inside
# the function. In pure Python 3, we'd use the nonlocal keyword for this.
_previous = [None, None]
KEY, VALUE = 0, 1
_kwd_mark = object()
@wraps(f)
def memoized_f(*args, **kwds):
# Hashing logic taken from functools32.lru_cache.
key = args
if kwds:
key += _kwd_mark + tuple(sorted(iteritems(kwds)))
key_hash = hash(key)
if key_hash != _previous[KEY]:
_previous[VALUE] = f(*args, **kwds)
_previous[KEY] = key_hash
return _previous[VALUE]
return memoized_f | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2000, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/**
@file include/caching_sha2_passwordopt-longopts.h
*/
{"server-public-key-path",
OPT_SERVER_PUBLIC_KEY,
"File path to the server public RSA key in PEM format.",
&opt_server_public_key,
&opt_server_public_key,
nullptr,
GET_STR,
REQUIRED_ARG,
0,
0,
0,
nullptr,
0,
nullptr},
{"get-server-public-key",
0,
"Get server public key",
&opt_get_server_public_key,
&opt_get_server_public_key,
nullptr,
GET_BOOL,
NO_ARG,
0,
0,
0,
nullptr,
0,
nullptr}, | c | github | https://github.com/mysql/mysql-server | client/include/caching_sha2_passwordopt-longopts.h |
name: Release Doctor
on:
push:
branches:
- main
workflow_dispatch:
jobs:
release_doctor:
name: release doctor
runs-on: ubuntu-latest
environment: publish
if: github.repository == 'openai/openai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
- uses: actions/checkout@v6
- name: Check release environment
run: |
bash ./bin/check-release-environment
env:
STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }}
PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} | unknown | github | https://github.com/openai/openai-python | .github/workflows/release-doctor.yml |
# -*- coding: utf-8 -*-
import re
import os
import logging
from lxml import etree, html
from eduskunta.importer import Importer
from parliament.models.member import Seat, MemberSeat, Member
class SeatImporter(Importer):
SEAT_FILENAME = 'seats.txt'
MPSEAT_FILENAME = 'mp-seats.txt'
def import_seats(self):
path = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(path, self.SEAT_FILENAME))
count = 0
for line in f.readlines():
line = line.decode('utf8').strip()
if not line or line[0] == '#':
continue
(row, seat, x, y) = line.split('\t')
try:
seat = Seat.objects.get(row=row, seat=seat)
if not self.replace:
continue
except Seat.DoesNotExist:
seat = Seat(row=row, seat=seat)
seat.x = x
seat.y = y
seat.save()
count += 1
self.logger.info("%d seat coordinates imported" % count)
f.close()
count = 0
f = open(os.path.join(path, self.MPSEAT_FILENAME))
for line in f.readlines():
line = line.decode('utf8').strip()
if not line or line[0] == '#':
continue
(row, seat, mp, begin, end) = line.split('\t')
if end == '-':
end = None
mp = Member.objects.get(name=mp)
try:
mps = MemberSeat.objects.get(member=mp, begin=begin)
if not self.replace:
continue
except MemberSeat.DoesNotExist:
mps = MemberSeat(member=mp, begin=begin)
seat = Seat.objects.get(row=row, seat=seat)
mps.seat = seat
mps.end = end
mps.save()
count += 1
self.logger.info("%d MP seatings imported" % count)
f.close() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This test exercises the addition operator of Action objects.
# Using Environment.Prepend() and Environment.Append(), you should be
# able to add new actions to existing ones, effectively adding steps
# to a build process.
__revision__ = "test/Actions/append.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import stat
import sys
import TestSCons
if sys.platform == 'win32':
_exe = '.exe'
else:
_exe = ''
test = TestSCons.TestSCons()
test.write('foo.c', r"""
#include <stdio.h>
int main(void)
{
printf("Foo\n");
return 0;
}
""")
test.write('SConstruct', """
env=Environment()
def before(env, target, source):
f=open(str(target[0]), "wb")
f.write("Foo\\n")
f.close()
f=open("before.txt", "wb")
f.write("Bar\\n")
f.close()
def after(env, target, source):
fin = open(str(target[0]), "rb")
fout = open("after%s", "wb")
fout.write(fin.read())
fout.close()
fin.close()
env.Prepend(LINKCOM=Action(before))
env.Append(LINKCOM=Action(after))
env.Program(source='foo.c', target='foo')
""" % _exe)
after_exe = test.workpath('after' + _exe)
test.run(arguments='.')
test.fail_test(open('before.txt', 'rb').read() != "Bar\n")
os.chmod(after_exe, os.stat(after_exe)[stat.ST_MODE] | stat.S_IXUSR)
test.run(program=after_exe, stdout="Foo\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import sys, time
from django.conf import settings
from django.db import connection, transaction, backend
from django.core import management
from django.dispatch import dispatcher
from django.test import signals
from django.template import Template
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
def instrumented_test_render(self, context):
"""An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
dispatcher.send(signal=signals.template_rendered, sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
"""
Template.original_render = Template.render
Template.render = instrumented_test_render
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
"""
Template.render = Template.original_render
del Template.original_render
def _set_autocommit(connection):
"Make sure a connection is in autocommit mode."
if hasattr(connection.connection, "autocommit"):
connection.connection.autocommit(True)
elif hasattr(connection.connection, "set_isolation_level"):
connection.connection.set_isolation_level(0)
def create_test_db(verbosity=1, autoclobber=False):
if verbosity >= 1:
print "Creating test database..."
# If we're using SQLite, it's more convenient to test against an
# in-memory database.
if settings.DATABASE_ENGINE == "sqlite3":
TEST_DATABASE_NAME = ":memory:"
else:
if settings.TEST_DATABASE_NAME:
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
else:
TEST_DATABASE_NAME = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = connection.cursor()
_set_autocommit(connection)
try:
cursor.execute("CREATE DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
connection.close()
settings.DATABASE_NAME = TEST_DATABASE_NAME
management.syncdb(verbosity, interactive=False)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = connection.cursor()
def destroy_test_db(old_database_name, verbosity=1):
# Unless we're using SQLite, remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
if verbosity >= 1:
print "Destroying test database..."
connection.close()
TEST_DATABASE_NAME = settings.DATABASE_NAME
settings.DATABASE_NAME = old_database_name
if settings.DATABASE_ENGINE != "sqlite3":
cursor = connection.cursor()
_set_autocommit(connection)
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
connection.close() | unknown | codeparrot/codeparrot-clean | ||
#ifndef _OPENCV_IMAGESTORAGE_H_
#define _OPENCV_IMAGESTORAGE_H_
class CvCascadeImageReader
{
public:
bool create( const std::string _posFilename, const std::string _negFilename, cv::Size _winSize );
void restart() { posReader.restart(); }
bool getNeg(cv::Mat &_img) { return negReader.get( _img ); }
bool getPos(cv::Mat &_img) { return posReader.get( _img ); }
private:
class PosReader
{
public:
PosReader();
virtual ~PosReader();
bool create( const std::string _filename );
bool get( cv::Mat &_img );
void restart();
short* vec;
FILE* file;
int count;
int vecSize;
int last;
int base;
} posReader;
class NegReader
{
public:
NegReader();
bool create( const std::string _filename, cv::Size _winSize );
bool get( cv::Mat& _img );
bool nextImg();
cv::Mat src, img;
std::vector<std::string> imgFilenames;
cv::Point offset, point;
float scale;
float scaleFactor;
float stepFactor;
size_t last, round;
cv::Size winSize;
} negReader;
};
#endif | c | github | https://github.com/opencv/opencv | apps/traincascade/imagestorage.h |
#!/usr/bin/python
# coding=UTF-8
#
# BitCurator NLP (Disk Image Access for the Web)
# Copyright (C) 2014 - 2016
# All rights reserved.
#
# This code is distributed under the terms of the GNU General Public
# License, Version 3. See the text file "COPYING" for further details
# about the terms of this license.
#
# This file contains the main BitCurator NLP application for Topic modeling
# Usage: python bcnlp_tm.py [--topics <10>] [--tm <gensim|graphlab>]
# Default num_topics = 10, tm=graphlab
import os
import logging
import pyLDAvis
import pyLDAvis.gensim
import pyLDAvis.graphlab
import graphlab as gl
from gensim import corpora, models, similarities
import gensim
import textract
from bn_filextract import *
from configobj import ConfigObj
from stop_words import get_stop_words
try:
from argparse import ArgumentParser
except ImportError:
raise ImportError("This script requires ArgumentParser which is in Python 2.7 or Python 3.0")
#logging.basicConfig(filename= 'bcnlp_tm.log', level=logging.DEBUG)
logging.basicConfig(filename= 'bcnlp_tm_info.log', level=logging.INFO)
logging.basicConfig(filename= 'bcnlp_tm_debug.log', level=logging.DEBUG)
logging.basicConfig(filename= 'bcnlp_tm_warning.log', level=logging.WARNING)
cfg_image = {}
#documents = []
class BnTopicModel():
def tm_generate_gensim(self, infile, num_topics, config_file):
''' Using the APIs provided by gensim, LDAvis gui is invoked.
NOTE: This is not yet tested well.
'''
documents = []
documents = bn.bnTraverseInfileDir(infile, documents, config_file)
if documents == []:
print("Documents are empty")
# remove common words and tokenize
'''
stoplist = set('a an the of to for s from is and this \
was were are , - | @ . '.split())
texts = [[word for word in document.lower().split() \
if word not in stoplist] \
for document in documents]
'''
en_stop = get_stop_words('en')
logging.info("Stop-words list: %s ", en_stop)
texts = [[word for word in document.lower().split() \
if word not in en_stop] \
for document in documents]
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1]
for text in texts]
texts = [[token for token in text if len(token) > 2]
for text in texts]
# NOTE: lemmatize not working
###texts = gensim.utils.lemmatize(texts)
dictionary = corpora.Dictionary(texts)
##logging.info("[V]: token:id: %s", dictionary.token2id)
## dictionary.compactify()
dictionary.save('/tmp/saved_dict.dict')
# Now convert tokenized documents to vectors:
corpus = [dictionary.doc2bow(text) for text in texts]
## logging.info("[V] Corpus: %s ", corpus)
# store to disk, for later use
corpora.MmCorpus.serialize('/tmp/saved_dict.mm', corpus)
## Creating Transformations
## The transformations are standard Python objects, typically
## initialized (trained) by means of a training corpus:
## First, let's use tfidf for training: It just involves simply
## going thru the supplied corpus once and computing document
## frequencies of all its featuers.
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
corpus_tfidf = tfidf[corpus]
corpora.MmCorpus.serialize('/tmp/saved_corpus_tfidf.mm', corpus_tfidf)
'''
# LSI model is commented out for now
print "Printing TFIDF of given corpus \n"
for doc in corpus_tfidf:
print (doc)
# Now Initialize an LSI transformation: num_topics set to 2 to make
# it 2D lsi = models.LsiModel(corpus_tfidf, id2word=dictionary,
# num_topics=3)
# create a double wrapper over the original corpus:
# bow->tfidf->fold-in-lsi
corpus_lsi = lsi[corpus_tfidf]
print "Printing LSI topics"
lsi.print_topics(4)
for doc in corpus_lsi:
print (doc)
'''
# Create an LDA model
'''
lda_model = models.LdaModel(corpus_tfidf, \
id2word=dictionary, \
num_topics=5)
'''
lda_model = models.ldamodel.LdaModel(corpus=corpus, \
id2word=dictionary, \
num_topics=num_topics)
corpus_lda = lda_model[corpus]
corpus_lda_tfidf = lda_model[corpus_tfidf]
# The following will print the topics in the logfile
logging.info("Printing %s topics into log file: ", str(num_topics))
lda_model.print_topics(num_topics)
# Generate data for the pyLDAvis interface from the lda_model above
vis_data = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary)
##vis_data = pyLDAvis.gensim.prepare(lda_model, corpus_lda, dictionary)
#pyLDAvis.display(vis_data)
pyLDAvis.show(vis_data)
def tm_generate_graphlab(self, indir, num_topics, config_file):
''' Generate the LDA model for documents in indir, using graphlab
'''
indir_path = os.path.join(os.getcwd(), indir)
print(">> Graphlab: Creating SArray for files in ", indir)
sa = self.bnGenerateSArray(indir, config_file)
sa_docs = gl.text_analytics.count_words(sa)
sa_docs_nsw = sa_docs.dict_trim_by_keys(gl.text_analytics.stopwords(), \
True)
num_iterations = bn.bnGetConfigInfo(config_file, \
"confset_section", "num_iterations")
print(">> Graphlab: Creating topic model with {} topics: ".\
format(num_topics))
topic_model = gl.topic_model.create(sa_docs_nsw, \
num_topics=int(num_topics), \
num_iterations=int(num_iterations))
print(">> Graphlab: Preparing data: ")
vis_data = pyLDAvis.graphlab.prepare(topic_model, sa_docs_nsw)
print(">> Graphlab: Launching graphics ")
pyLDAvis.show(vis_data)
def remove_punctuation(self, text):
import string
return text.translate(None, string.punctuation)
def remove_digits(self, text):
import string
return text.translate(None, string.digits)
def bnGenerateSArray(self, filextract_dir, config_file):
''' Traverse through the files in a directory and create sArrays
and append them into one single sArray.
'''
fname = sys._getframe().f_code.co_name
num_docs = 0
sa_g = gl.SArray(dtype = str)
sw_list = ['a', 'an', 'the', 'of', 'to', 'for','as', 'from', 'is', \
'was', 'were', 'are', ',', '-', '|', '@', '.' ]
for root, dirs, files in os.walk(filextract_dir):
path = root.split(os.sep)
'''
print "path: ", path, len(path)
print "dirs: ", dirs
print "files: ", files
print((len(path) - 1) * '---', os.path.basename(root))
'''
# if no files continue to next level
if files == []:
continue
for filename in files:
file_path = '/'.join(path) + '/' + filename
bn = BnFilextract()
if os.stat(file_path).st_size == 0:
logging.info(">>>> File %s is empty. Skip it ", file_path)
continue
if bn.isFileTextractable(filename, config_file):
try:
input_file_contents = textract.process(file_path)
logging.info("Textracted %s ", file_path)
if len(input_file_contents) == 0:
logging.info(">>>> File %s is empty. Skip it ", file_path)
continue
except:
logging.info("Textract failed for file %s ", filename)
continue
input_file_contents = self.remove_punctuation(input_file_contents)
input_file_contents = self.remove_digits(input_file_contents)
file_path = os.path.splitext(file_path)[0]+'.txt'
logging.info("%s: writing contents to outfile:%s ",
fname, file_path)
else:
logging.info("File %s is NOT textractable ",filename)
continue
with open(file_path, "w") as text_file:
text_file.write(input_file_contents)
logging.info(">>> Getting SArray for file %s ", file_path)
sa_sub = gl.SArray(file_path)
gl.text_analytics.trim_rare_words(sa_sub, \
threshold=2, stopwords=sw_list )
# Now append the sub-sarray to the main one.
if num_docs == 0:
sa_g = sa_sub
else:
sa_g = sa_g.append(sa_sub)
num_docs += 1
logging.info("%s: Total num docs: %d ", fname, num_docs)
return sa_g
def bnRemoveEmptyFiles(self, path):
''' Traverses the directory and recursively removes empty files.
'''
files = os.listdir(path)
if len(files):
for fl in files:
fullpath = os.path.join(path, fl)
if os.path.isdir(fullpath):
self.bnRemoveEmptyFiles(fullpath)
if os.stat(fullpath).st_size == 0:
logging.info("Removing file %s ", fullpath)
os.remove(fullpath)
def bn_parse_config_file(config_file, section_name):
''' Parses the config file to extract the image names and entity list.
'''
logging.info("bn_parse_config_file: Section: %s ", section_name)
config = ConfigObj(config_file)
section = config[section_name]
i = 0
cfg_entity_list = []
for key in section:
#if key == cfg_string:
# found the string
#return section[key]
if section_name == "image_section":
logging.info("parse_config: key: %s, section: %s", \
key, section[key])
cfg_image[i] = key
i+=1
elif section_name == "entity_list_section":
flag = int(entity_list_section[key])
if flag == 1:
cfg_entity_list.append(key)
if section_name == "entity_list_section":
return cfg_entity_list
if __name__ == "__main__":
parser = ArgumentParser(prog='bcnlp_tm.py', description='Topic modeling')
parser.add_argument('--config', action='store', \
help="Config file[bntm_config.txt] ")
parser.add_argument('--infile', action='store', help="input directory ")
parser.add_argument('--tm', action='store', \
help="topic modeling :gensim/graphlab ")
parser.add_argument('--topics', action='store', help="number of topics ")
args = parser.parse_args()
# Infile specifies the directory of files to run the topic modeling on.
# If no argument specified, it will assume there are disk-images specified
# in the config file bntm_config.txt.
infile = args.infile
tm = args.tm # Topic modeling type: gensim/graphlab
config_file = args.config
is_disk_image = False
num_topics = 10
if args.topics:
num_topics = args.topics
# default it to Graphlab
if tm == None:
tm = 'graphlab'
if config_file == None:
config_file = "bntm_config.txt"
bn = BnFilextract()
if infile == None:
is_disk_image = True
bn_parse_config_file(config_file, "image_section")
print(">> Images in the config file: ", cfg_image)
infile = bn.bnGetConfigInfo(config_file, \
"confset_section", "file_staging_directory")
i = 0
for img in cfg_image:
print(">> Extracting files from image {}...".format(cfg_image[img]))
bn.bnExtractFiles(None, cfg_image[img], i, None, config_file)
i += 1
print(">> ... Done ")
else:
print(">> Extracting files from ", infile)
bn.bnTraverseInfileDir(infile, documents, config_file)
tmc = BnTopicModel()
if tm == 'gensim':
tmc.tm_generate_gensim(infile, num_topics, config_file)
elif tm == 'graphlab':
if is_disk_image:
indir = bn.bnGetOutDirFromConfig(config_file)
print(">> Generating graphlab for images in disk image")
logging.info(">> Generating graphlab for images in disk image")
logging.info("File-extracted directory: %s ", indir)
tmc.tm_generate_graphlab(indir, num_topics, config_file)
else:
print(">> Generating graphlab for files in ", infile)
logging.info(">> Generating graphlab for files in %s", infile)
tmc.tm_generate_graphlab(infile, num_topics, config_file) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
name: Scorecards supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '26 3 * * 2'
push:
branches: [ "master" ]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
if: github.repository == 'tensorflow/tensorflow' # Don't do this in forks
name: Scorecards analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
steps:
- name: "Checkout code"
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
with:
results_file: results.sarif
results_format: sarif
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v3.29.5
with:
sarif_file: results.sarif | unknown | github | https://github.com/tensorflow/tensorflow | .github/workflows/scorecards-analysis.yml |
import os
def parse_distributions_h(ffi, inc_dir):
"""
Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
Read the function declarations without the "#define ..." macros that will
be filled in when loading the library.
"""
with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
s = []
for line in fid:
# massage the include file
if line.strip().startswith('#'):
continue
s.append(line)
ffi.cdef('\n'.join(s))
with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
s = []
in_skip = 0
ignoring = False
for line in fid:
# check for and remove extern "C" guards
if ignoring:
if line.strip().startswith('#endif'):
ignoring = False
continue
if line.strip().startswith('#ifdef __cplusplus'):
ignoring = True
# massage the include file
if line.strip().startswith('#'):
continue
# skip any inlined function definition
# which starts with 'static inline xxx(...) {'
# and ends with a closing '}'
if line.strip().startswith('static inline'):
in_skip += line.count('{')
continue
elif in_skip > 0:
in_skip += line.count('{')
in_skip -= line.count('}')
continue
# replace defines with their value or remove them
line = line.replace('DECLDIR', '')
line = line.replace('RAND_INT_TYPE', 'int64_t')
s.append(line)
ffi.cdef('\n'.join(s)) | python | github | https://github.com/numpy/numpy | numpy/random/_examples/cffi/parse.py |
from builtins import str
from builtins import object
import logging
import copy
from elasticsearch import Elasticsearch
class BmajIndex(object):
'''
ElasticSearch indexation and search
'''
'''
ElasticSearch server
'''
es = None
'''
Index name
'''
index = 'biomaj'
'''
Do indexing
'''
do_index = False
@staticmethod
def load(hosts=None, index='biomaj', do_index=True):
'''
Initialize index
:param hosts: List of elastic search nodes to connect to
:type hosts: list
:param do_index: index data or not
:type do_index: bool
'''
if hosts is None:
hosts=['localhost']
if not do_index:
return
BmajIndex.index = index
BmajIndex.do_index = do_index
if BmajIndex.es is None:
BmajIndex.es = Elasticsearch(hosts)
mapping = {
"mappings": {
"production": {
"date_detection": False
},
"releasestats": {
"date_detection": False,
"_timestamp" : {
"enabled" : True,
"store" : True
}
}
}
}
try:
if not BmajIndex.es.indices.exists(index=BmajIndex.index):
BmajIndex.es.indices.create(index=BmajIndex.index,body=mapping)
except Exception as e:
logging.error('ElasticSearch connection error, check server is running and configuration')
raise e
@staticmethod
def delete_all_bank(bank_name):
'''
Delete complete index for a bank
'''
if not BmajIndex.do_index:
return
query = {
"query" : {
"term" : { "bank" : bank_name }
}
}
BmajIndex.es.delete_by_query(index=BmajIndex.index, body=query)
@staticmethod
def remove(bank_name, release):
'''
Remove a production release
:param bank_name: Name of the bank
:type bank_name: str
:param release: production release
:type release: str
'''
if not BmajIndex.do_index:
return
try:
query = {
"query" : {
"term" : { "release" : release, "bank": bank_name }
}
}
BmajIndex.es.delete_by_query(index=BmajIndex.index, body=query)
except Exception as e:
logging.error('Index:Remove:'+bank_name+'_'+str(release)+':Exception:'+str(e))
@staticmethod
def search(query):
if not BmajIndex.do_index:
return None
res = BmajIndex.es.search(index=BmajIndex.index, doc_type='production', body=query)
return res['hits']['hits']
@staticmethod
def searchq(query,size=1000):
'''
Lucene syntax search
:param query: Lucene search string
:type query: str
:param size: number of results
:type size: int
:return: list of matches
'''
if not BmajIndex.do_index:
return None
res = BmajIndex.es.search(index=BmajIndex.index, doc_type='production', q=query, size=size)
return res['hits']['hits']
@staticmethod
def add_stat(stat_id, stat):
'''
Add some statistics, must contain release and bank properties.
'''
if not BmajIndex.do_index:
return
if stat['release'] is None or stat['bank'] is None:
return False
#stat['bank'] = bank_name
BmajIndex.es.index(index=BmajIndex.index, doc_type='releasestats', id=stat_id, body=stat)
return True
@staticmethod
def add(bank_name, prod, flush=False):
'''
Index a production release
:param bank_name: Name of the bank
:type bank_name: str
:param prod: session release object
:type prod: dict
:param flush: Force flushing
:type flush: bool
'''
if not BmajIndex.do_index:
return
obj = copy.deepcopy(prod)
if obj['release'] is None:
return
obj['bank'] = bank_name
formats = obj['formats']
try:
for fkey,fvalue in formats.items():
for elt in fvalue:
elt['format'] = fkey
elt['bank'] = bank_name
elt['release'] = obj['release']
if 'status' in obj:
elt['status'] = obj['status']
res = BmajIndex.es.index(index=BmajIndex.index, doc_type='production', body=elt)
if flush:
BmajIndex.es.indices.flush(index=BmajIndex.index, force=True)
except Exception as e:
logging.error('Index:Add:'+bank_name+'_'+str(obj['release'])+':Exception:'+str(e)) | unknown | codeparrot/codeparrot-clean | ||
from optparse import OptionParser
import sys
import graph
def init_parser():
parser = OptionParser()
parser.add_option("-f", "--file", dest = "File", type = "string", help = "")
parser.add_option("-o", "--output", dest = "Output", type = "string", help = "")
(options, args) = parser.parse_args() # user input is stored in "options"
return options
def main():
options = init_parser()
if options.File == None:
print "Error: Did not specify input file."
sys.exit(1)
if options.Output == None:
print "Warning: Did not specify output file. Defaulting to 'image.pdf'"
options.Output = "image"
pairs = []
with open(options.File, 'r') as input_file:
for line in input_file:
pairs.append(line.strip())
mapping = {}
for p in pairs:
(parent, children) = p.split(":")
if parent not in mapping:
mapping[parent] = []
mapping[parent].extend(children.split(","))
image = graph.Graph(mapping)
image.render(options.Output)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
"""Calendar module for Zinnia"""
from __future__ import absolute_import
from datetime import date
from calendar import HTMLCalendar
from django.utils.dates import MONTHS
from django.utils.dates import WEEKDAYS_ABBR
from django.utils.formats import get_format
from django.utils.formats import date_format
from django.core.urlresolvers import reverse
from zinnia.models.entry import Entry
AMERICAN_TO_EUROPEAN_WEEK_DAYS = [6, 0, 1, 2, 3, 4, 5]
class Calendar(HTMLCalendar):
"""
Extension of the HTMLCalendar.
"""
def __init__(self):
"""
Retrieve and convert the localized first week day
at initialization.
"""
HTMLCalendar.__init__(self, AMERICAN_TO_EUROPEAN_WEEK_DAYS[
get_format('FIRST_DAY_OF_WEEK')])
def formatday(self, day, weekday):
"""
Return a day as a table cell with a link
if entries are published this day.
"""
if day and day in self.day_entries:
day_date = date(self.current_year, self.current_month, day)
archive_day_url = reverse('zinnia:entry_archive_day',
args=[day_date.strftime('%Y'),
day_date.strftime('%m'),
day_date.strftime('%d')])
return '<td class="%s entry"><a href="%s" '\
'class="archives">%d</a></td>' % (
self.cssclasses[weekday], archive_day_url, day)
return super(Calendar, self).formatday(day, weekday)
def formatweekday(self, day):
"""
Return a weekday name translated as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day],
WEEKDAYS_ABBR[day].title())
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
return '<thead>%s</thead>' % super(Calendar, self).formatweekheader()
def formatfooter(self, previous_month, next_month):
"""
Return a footer for a previous and next month.
"""
footer = '<tfoot><tr>' \
'<td colspan="3" class="prev">%s</td>' \
'<td class="pad"> </td>' \
'<td colspan="3" class="next">%s</td>' \
'</tr></tfoot>'
if previous_month:
previous_content = '<a href="%s" class="previous-month">%s</a>' % (
reverse('zinnia:entry_archive_month', args=[
previous_month.strftime('%Y'),
previous_month.strftime('%m')]),
date_format(previous_month, 'YEAR_MONTH_FORMAT'))
else:
previous_content = ' '
if next_month:
next_content = '<a href="%s" class="next-month">%s</a>' % (
reverse('zinnia:entry_archive_month', args=[
next_month.strftime('%Y'),
next_month.strftime('%m')]),
date_format(next_month, 'YEAR_MONTH_FORMAT'))
else:
next_content = ' '
return footer % (previous_content, next_content)
def formatmonthname(self, theyear, themonth, withyear=True):
"""Return a month name translated as a table row."""
monthname = '%s %s' % (MONTHS[themonth].title(), theyear)
return '<caption>%s</caption>' % monthname
def formatmonth(self, theyear, themonth, withyear=True,
previous_month=None, next_month=None):
"""
Return a formatted month as a table
with new attributes computed for formatting a day,
and thead/tfooter.
"""
self.current_year = theyear
self.current_month = themonth
self.day_entries = [date.day
for date in Entry.published.filter(
publication_date__year=theyear,
publication_date__month=themonth
).datetimes('publication_date', 'day')]
v = []
a = v.append
a('<table class="%s">' % (
self.day_entries and 'entries-calendar' or 'no-entries-calendar'))
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
a(self.formatfooter(previous_month, next_month))
a('\n<tbody>\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</tbody>\n</table>')
a('\n')
return ''.join(v) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/loongson,ls1b-apbdma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Loongson-1 APB DMA Controller
maintainers:
- Keguang Zhang <keguang.zhang@gmail.com>
description:
Loongson-1 APB DMA controller provides 3 independent channels for
peripherals such as NAND, audio playback and capture.
properties:
compatible:
oneOf:
- const: loongson,ls1b-apbdma
- items:
- enum:
- loongson,ls1a-apbdma
- loongson,ls1c-apbdma
- const: loongson,ls1b-apbdma
reg:
maxItems: 1
interrupts:
items:
- description: NAND interrupt
- description: Audio playback interrupt
- description: Audio capture interrupt
interrupt-names:
items:
- const: ch0
- const: ch1
- const: ch2
'#dma-cells':
const: 1
required:
- compatible
- reg
- interrupts
- interrupt-names
- '#dma-cells'
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
dma-controller@1fd01160 {
compatible = "loongson,ls1b-apbdma";
reg = <0x1fd01160 0x4>;
interrupt-parent = <&intc0>;
interrupts = <13 IRQ_TYPE_EDGE_RISING>,
<14 IRQ_TYPE_EDGE_RISING>,
<15 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "ch0", "ch1", "ch2";
#dma-cells = <1>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml |
/* __next_internal_client_entry_do_not_use__ default auto */ export default function App() {
async function fn() {
'use server';
}
return <div>App</div>;
} | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/errors/server-actions/client-graph/1/output.js |
"""
Module simplifying manipulation of XML described at
http://libvirt.org/formatstorage.html#StorageVol
"""
from virttest.libvirt_xml import base, accessors
from virttest.libvirt_xml.xcepts import LibvirtXMLNotFoundError
class VolXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for VolXML class.
Properties:
name: string, operates on XML name tag
key: string, operates on key tag
capacity: integer, operates on capacity attribute of capacity tag
allocation: integer, operates on allocation attribute of allocation
format: string, operates on type attribute of format tag
path: string, operates on path attribute of path tag
owner, integer, operates on owner attribute of owner tag
group, integer, operates on group attribute of group tag
mode: string, operates on mode attribute of mode tag
label: string, operates on label attribute of label tag
compat: string, operates on compat attribute of label tag
lazy_refcounts: bool, True/False
encryption: VolXMLBase.Encryption instance.
capacity_unit: string, operates on unit attribute of capacity tag
"""
__slots__ = ('name', 'key', 'capacity', 'allocation', 'format', 'path',
'owner', 'group', 'mode', 'label', 'compat', 'lazy_refcounts',
'encryption', "capacity_unit")
__uncompareable__ = base.LibvirtXMLBase.__uncompareable__
__schema_name__ = "storagevol"
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementText('name', self, parent_xpath='/',
tag_name='name')
accessors.XMLElementText('key', self, parent_xpath='/',
tag_name='key')
accessors.XMLElementInt('capacity', self, parent_xpath='/',
tag_name='capacity')
accessors.XMLElementInt('allocation', self, parent_xpath='/',
tag_name='allocation')
accessors.XMLAttribute('format', self, parent_xpath='/target',
tag_name='format', attribute='type')
accessors.XMLAttribute('capacity_unit', self, parent_xpath='/',
tag_name='capacity', attribute='unit')
accessors.XMLElementNest('encryption', self, parent_xpath='/target',
tag_name='encryption', subclass=self.Encryption,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementText('path', self, parent_xpath='/target',
tag_name='path')
accessors.XMLElementInt('owner', self,
parent_xpath='/target/permissions',
tag_name='owner')
accessors.XMLElementInt('group', self,
parent_xpath='/target/permissions',
tag_name='group')
accessors.XMLElementText('mode', self,
parent_xpath='/target/permissions',
tag_name='mode')
accessors.XMLElementText('label', self,
parent_xpath='/target/permissions',
tag_name='label')
accessors.XMLElementText('compat', self, parent_xpath='/target',
tag_name='compat')
accessors.XMLElementBool('lazy_refcounts', self,
parent_xpath='/target/features',
tag_name='lazy_refcounts')
super(VolXMLBase, self).__init__(virsh_instance=virsh_instance)
class VolXML(VolXMLBase):
"""
Manipulators of a Virtual Vol through it's XML definition.
"""
__slots__ = []
def __init__(self, vol_name='default', virsh_instance=base.virsh):
"""
Initialize new instance with empty XML
"""
super(VolXML, self).__init__(virsh_instance=virsh_instance)
self.xml = u"<volume><name>%s</name></volume>" % vol_name
def new_encryption(self, **dargs):
"""
Return a new volume encryption instance and set properties from dargs
"""
new_one = self.Encryption(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def create(self, pool_name, virsh_instance=base.virsh):
"""
Create volume with virsh from this instance
"""
result = virsh_instance.vol_create(pool_name, self.xml)
if result.exit_status:
return False
return True
@staticmethod
def new_from_vol_dumpxml(vol_name, pool_name, virsh_instance=base.virsh):
"""
Return new VolXML instance from virsh vol-dumpxml command
:param vol_name: Name of vol to vol-dumpxml
:param virsh_instance: virsh module or instance to use
:return: New initialized VolXML instance
"""
volxml = VolXML(virsh_instance=virsh_instance)
volxml['xml'] = virsh_instance.vol_dumpxml(vol_name, pool_name)\
.stdout.strip()
return volxml
@staticmethod
def get_vol_details_by_name(vol_name, pool_name, virsh_instance=base.virsh):
"""
Return volume xml dictionary by Vol's uuid or name.
:param vol_name: Vol's name
:return: volume xml dictionary
"""
volume_xml = {}
vol_xml = VolXML.new_from_vol_dumpxml(vol_name, pool_name,
virsh_instance)
volume_xml['key'] = vol_xml.key
volume_xml['path'] = vol_xml.path
volume_xml['capacity'] = vol_xml.capacity
volume_xml['allocation'] = vol_xml.allocation
try:
volume_xml['format'] = vol_xml.format
except LibvirtXMLNotFoundError:
volume_xml['format'] = None
return volume_xml
@staticmethod
def new_vol(**dargs):
"""
Return a new VolXML instance and set properties from dargs
:param dargs: param dictionary
:return: new VolXML instance
"""
new_one = VolXML(virsh_instance=base.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
class Encryption(base.LibvirtXMLBase):
"""
Encryption volume XML class
Properties:
format:
string.
secret:
dict, keys: type, uuid
"""
__slots__ = ('format', 'secret')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute('format', self, parent_xpath='/',
tag_name='encryption', attribute='format')
accessors.XMLElementDict('secret', self, parent_xpath='/',
tag_name='secret')
super(VolXML.Encryption, self).__init__(
virsh_instance=virsh_instance)
self.xml = '<encryption/>' | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "models/auto_id"
require "models/aircraft"
require "models/dashboard"
require "models/clothing_item"
require "models/post"
require "models/comment"
require "models/author"
require "models/topic"
require "models/reply"
require "models/category"
require "models/company"
require "models/developer"
require "models/computer"
require "models/project"
require "models/minimalistic"
require "models/parrot"
require "models/minivan"
require "models/car"
require "models/person"
require "models/ship"
require "models/admin"
require "models/admin/user"
require "models/cpk"
require "models/chat_message"
require "models/default"
require "models/post_with_prefetched_pk"
require "models/pk_autopopulated_by_a_trigger_record"
class PersistenceTest < ActiveRecord::TestCase
fixtures :topics, :companies, :developers, :accounts, :minimalistics, :authors, :author_addresses,
:posts, :minivans, :clothing_items, :cpk_books, :people, :cars
def test_populates_non_primary_key_autoincremented_column
topic = TitlePrimaryKeyTopic.create!(title: "title pk topic")
assert_not_nil topic.attributes["id"]
end
def test_populates_autoincremented_id_pk_regardless_of_its_position_in_columns_list
auto_populated_column_names = AutoId.columns.select(&:auto_populated?).map(&:name)
# It's important we test a scenario where tables has more than one auto populated column
# and the first column is not the primary key. Otherwise it will be a regular test not asserting this special case.
assert auto_populated_column_names.size > 1
assert_not_equal AutoId.primary_key, auto_populated_column_names.first
record = AutoId.create!
last_id = AutoId.last.id
assert_not_nil last_id
assert last_id > 0
assert_equal last_id, record.id
end
def test_populates_non_primary_key_autoincremented_column_for_a_cpk_model
order = Cpk::Order.create(shop_id: 111_222)
_shop_id, order_id = order.id
assert_not_nil order_id
end
if current_adapter?(:PostgreSQLAdapter)
def test_fills_auto_populated_columns_on_creation
record = Default.create
assert_not_nil record.id
assert_equal "Ruby on Rails", record.ruby_on_rails
if supports_virtual_columns?
assert_not_nil record.virtual_stored_number
end
assert_not_nil record.random_number
assert_not_nil record.modified_date
assert_not_nil record.modified_date_function
assert_not_nil record.modified_time
assert_not_nil record.modified_time_without_precision
assert_not_nil record.modified_time_function
assert_equal "A", record.binary_default_function
if supports_identity_columns?
klass = Class.new(ActiveRecord::Base) do
self.table_name = "postgresql_identity_table"
end
record = klass.create!
assert_not_nil record.id
end
end
elsif current_adapter?(:SQLite3Adapter)
def test_fills_auto_populated_columns_on_creation
record = Default.create
assert_not_nil record.id
assert_equal "Ruby on Rails", record.ruby_on_rails
assert_not_nil record.random_number
assert_not_nil record.modified_date
assert_not_nil record.modified_date_function
assert_not_nil record.modified_time
assert_not_nil record.modified_time_without_precision
assert_not_nil record.modified_time_function
end
elsif current_adapter?(:Mysql2Adapter, :TrilogyAdapter)
def test_fills_auto_populated_columns_on_creation
record = Default.create
assert_not_nil record.id
assert_not_nil record.char1
if supports_default_expression? && supports_insert_returning?
assert_not_nil record.uuid
end
end
end
def test_update_many
topic_data = { 1 => { "content" => "1 updated" }, 2 => { "content" => "2 updated" } }
updated = Topic.update(topic_data.keys, topic_data.values)
assert_equal [1, 2], updated.map(&:id)
assert_equal "1 updated", Topic.find(1).content
assert_equal "2 updated", Topic.find(2).content
end
def test_update_many_with_duplicated_ids
updated = Topic.update([1, 1, 2], [
{ "content" => "1 duplicated" }, { "content" => "1 updated" }, { "content" => "2 updated" }
])
assert_equal [1, 1, 2], updated.map(&:id)
assert_equal "1 updated", Topic.find(1).content
assert_equal "2 updated", Topic.find(2).content
end
def test_update_many_with_invalid_id
topic_data = { 1 => { "content" => "1 updated" }, 2 => { "content" => "2 updated" }, 99999 => {} }
assert_raise(ActiveRecord::RecordNotFound) do
Topic.update(topic_data.keys, topic_data.values)
end
assert_not_equal "1 updated", Topic.find(1).content
assert_not_equal "2 updated", Topic.find(2).content
end
def test_update_many_with_active_record_base_object
error = assert_raises(ArgumentError) do
Topic.update(Topic.first, "content" => "1 updated")
end
assert_equal "You are passing an instance of ActiveRecord::Base to `update`. " \
"Please pass the id of the object by calling `.id`.", error.message
assert_not_equal "1 updated", Topic.first.content
end
def test_update_many_with_array_of_active_record_base_objects
error = assert_raise(ArgumentError) do
Topic.update(Topic.first(2), content: "updated")
end
assert_equal "You are passing an array of ActiveRecord::Base instances to `update`. " \
"Please pass the ids of the objects by calling `pluck(:id)` or `map(&:id)`.", error.message
assert_not_equal "updated", Topic.first.content
assert_not_equal "updated", Topic.second.content
end
def test_class_level_update_without_ids
topics = Topic.all
assert_equal 5, topics.length
topics.each do |topic|
assert_not_equal "updated", topic.content
end
updated = Topic.update(content: "updated")
assert_equal 5, updated.length
updated.each do |topic|
assert_equal "updated", topic.content
end
end
def test_class_level_update_is_affected_by_scoping
topic_data = { 1 => { "content" => "1 updated" }, 2 => { "content" => "2 updated" } }
assert_raise(ActiveRecord::RecordNotFound) do
Topic.where("1=0").scoping { Topic.update(topic_data.keys, topic_data.values) }
end
assert_not_equal "1 updated", Topic.find(1).content
assert_not_equal "2 updated", Topic.find(2).content
end
def test_returns_object_even_if_validations_failed
assert_equal Developer.all.to_a, Developer.update(salary: 1_000_000)
end
def test_update_many!
topic_data = { 1 => { "content" => "1 updated" }, 2 => { "content" => "2 updated" } }
updated = Topic.update!(topic_data.keys, topic_data.values)
assert_equal [1, 2], updated.map(&:id)
assert_equal "1 updated", Topic.find(1).content
assert_equal "2 updated", Topic.find(2).content
end
def test_update_many_with_duplicated_ids!
updated = Topic.update!([1, 1, 2], [
{ "content" => "1 duplicated" }, { "content" => "1 updated" }, { "content" => "2 updated" }
])
assert_equal [1, 1, 2], updated.map(&:id)
assert_equal "1 updated", Topic.find(1).content
assert_equal "2 updated", Topic.find(2).content
end
def test_update_many_with_invalid_id!
topic_data = { 1 => { "content" => "1 updated" }, 2 => { "content" => "2 updated" }, 99999 => {} }
assert_raise(ActiveRecord::RecordNotFound) do
Topic.update!(topic_data.keys, topic_data.values)
end
assert_not_equal "1 updated", Topic.find(1).content
assert_not_equal "2 updated", Topic.find(2).content
end
def test_update_many_with_active_record_base_object!
error = assert_raises(ArgumentError) do
Topic.update!(Topic.first, "content" => "1 updated")
end
assert_equal "You are passing an instance of ActiveRecord::Base to `update!`. " \
"Please pass the id of the object by calling `.id`.", error.message
assert_not_equal "1 updated", Topic.first.content
end
def test_update_many_with_array_of_active_record_base_objects!
error = assert_raise(ArgumentError) do
Topic.update!(Topic.first(2), content: "updated")
end
assert_equal "You are passing an array of ActiveRecord::Base instances to `update!`. " \
"Please pass the ids of the objects by calling `pluck(:id)` or `map(&:id)`.", error.message
assert_not_equal "updated", Topic.first.content
assert_not_equal "updated", Topic.second.content
end
def test_class_level_update_without_ids!
topics = Topic.all
assert_equal 5, topics.length
topics.each do |topic|
assert_not_equal "updated", topic.content
end
updated = Topic.update!(content: "updated")
assert_equal 5, updated.length
updated.each do |topic|
assert_equal "updated", topic.content
end
end
def test_class_level_update_is_affected_by_scoping!
topic_data = { 1 => { "content" => "1 updated" }, 2 => { "content" => "2 updated" } }
assert_raise(ActiveRecord::RecordNotFound) do
Topic.where("1=0").scoping { Topic.update!(topic_data.keys, topic_data.values) }
end
assert_not_equal "1 updated", Topic.find(1).content
assert_not_equal "2 updated", Topic.find(2).content
end
def test_raises_error_when_validations_failed
assert_raises(ActiveRecord::RecordInvalid) do
Developer.update!(salary: 1_000_000)
end
end
def test_delete_all
assert Topic.count > 0
assert_equal Topic.count, Topic.delete_all
end
def test_increment_attribute
assert_equal 50, accounts(:signals37).credit_limit
accounts(:signals37).increment! :credit_limit
assert_equal 51, accounts(:signals37, :reload).credit_limit
accounts(:signals37).increment(:credit_limit).increment!(:credit_limit)
assert_equal 53, accounts(:signals37, :reload).credit_limit
end
def test_increment_aliased_attribute
assert_equal 50, accounts(:signals37).available_credit
accounts(:signals37).increment!(:available_credit)
assert_equal 51, accounts(:signals37, :reload).available_credit
accounts(:signals37).increment(:available_credit).increment!(:available_credit)
assert_equal 53, accounts(:signals37, :reload).available_credit
end
def test_increment_nil_attribute
assert_nil topics(:first).parent_id
topics(:first).increment! :parent_id
assert_equal 1, topics(:first).parent_id
end
def test_increment_attribute_by
assert_equal 50, accounts(:signals37).credit_limit
accounts(:signals37).increment! :credit_limit, 5
assert_equal 55, accounts(:signals37, :reload).credit_limit
accounts(:signals37).increment(:credit_limit, 1).increment!(:credit_limit, 3)
assert_equal 59, accounts(:signals37, :reload).credit_limit
end
def test_increment_updates_counter_in_db_using_offset
a1 = accounts(:signals37)
initial_credit = a1.credit_limit
a2 = Account.find(accounts(:signals37).id)
a1.increment!(:credit_limit)
a2.increment!(:credit_limit)
assert_equal initial_credit + 2, a1.reload.credit_limit
end
def test_increment_with_touch_updates_timestamps
topic = topics(:first)
assert_equal 1, topic.replies_count
previously_updated_at = topic.updated_at
travel(1.second) do
topic.increment!(:replies_count, touch: true)
end
assert_equal 2, topic.reload.replies_count
assert_operator previously_updated_at, :<, topic.updated_at
end
def test_increment_with_touch_an_attribute_updates_timestamps
topic = topics(:first)
assert_equal 1, topic.replies_count
previously_updated_at = topic.updated_at
previously_written_on = topic.written_on
travel(1.second) do
topic.increment!(:replies_count, touch: :written_on)
end
assert_equal 2, topic.reload.replies_count
assert_operator previously_updated_at, :<, topic.updated_at
assert_operator previously_written_on, :<, topic.written_on
end
def test_increment_with_no_arg
topic = topics(:first)
assert_raises(ArgumentError) { topic.increment! }
end
def test_increment_new_record
topic = Topic.new
assert_no_queries do
assert_raises ActiveRecord::ActiveRecordError do
topic.increment!(:replies_count)
end
end
end
def test_increment_destroyed_record
topic = topics(:first)
topic.destroy
assert_no_queries do
assert_raises ActiveRecord::ActiveRecordError do
topic.increment!(:replies_count)
end
end
end
def test_destroy_many
clients = Client.find([2, 3])
assert_difference("Client.count", -2) do
destroyed = Client.destroy([2, 3])
assert_equal clients, destroyed
assert destroyed.all?(&:frozen?), "destroyed clients should be frozen"
end
end
def test_destroy_many_with_invalid_id
clients = Client.find([2, 3])
assert_raise(ActiveRecord::RecordNotFound) do
Client.destroy([2, 3, 99999])
end
assert_equal clients, Client.find([2, 3])
end
def test_destroy_with_single_composite_primary_key
book = cpk_books(:cpk_great_author_first_book)
assert_difference("Cpk::Book.count", -1) do
destroyed = Cpk::Book.destroy(book.id)
assert_equal destroyed, book
end
end
def test_destroy_with_multiple_composite_primary_keys
books = [
cpk_books(:cpk_great_author_first_book),
cpk_books(:cpk_great_author_second_book),
]
assert_difference("Cpk::Book.count", -2) do
destroyed = Cpk::Book.destroy(books.map(&:id))
assert_equal books.sort, destroyed.sort
assert destroyed.all?(&:frozen?), "destroyed clients should be frozen"
end
end
def test_destroy_with_invalid_ids_for_a_model_that_expects_composite_keys
books = [
cpk_books(:cpk_great_author_first_book),
cpk_books(:cpk_great_author_second_book),
]
assert_raise(ActiveRecord::RecordNotFound) do
ids = books.map { |book| book.id.first }
Cpk::Book.destroy(ids)
end
end
def test_becomes
assert_kind_of Reply, topics(:first).becomes(Reply)
assert_equal "The First Topic", topics(:first).becomes(Reply).title
end
def test_becomes_after_reload_schema_from_cache
Reply.define_attribute_methods
Reply.serialize(:content) # invoke reload_schema_from_cache
assert_kind_of Reply, topics(:first).becomes(Reply)
assert_equal "The First Topic", topics(:first).becomes(Reply).title
end
def test_becomes_includes_errors
company = Company.new(name: nil)
assert_not_predicate company, :valid?
original_errors = company.errors
client = company.becomes(Client)
assert_equal original_errors.attribute_names, client.errors.attribute_names
end
def test_becomes_errors_base
child_class = Class.new(Admin::User) do
store_accessor :settings, :foo
def self.name; "Admin::ChildUser"; end
end
admin = Admin::User.new
admin.errors.add :token, :invalid
child = admin.becomes(child_class)
assert_equal [:token], child.errors.attribute_names
assert_nothing_raised do
child.errors.add :foo, :invalid
end
end
def test_duped_becomes_persists_changes_from_the_original
original = topics(:first)
copy = original.dup.becomes(Reply)
copy.save!
assert_equal "The First Topic", Topic.find(copy.id).title
end
def test_becomes_wont_break_mutation_tracking
topic = topics(:first)
reply = topic.becomes(Reply)
assert_equal 1, topic.id_in_database
assert_empty topic.attributes_in_database
assert_equal 1, reply.id_in_database
assert_empty reply.attributes_in_database
end
def test_becomes_includes_changed_attributes
company = Company.new(name: "37signals")
client = company.becomes(Client)
assert_equal "37signals", client.name
assert_equal %w{name}, client.changed
end
def test_becomes_preserve_record_status
company = Company.new(name: "37signals")
client = company.becomes(Client)
assert_predicate client, :new_record?
company.save
client = company.becomes(Client)
assert_predicate client, :persisted?
assert_predicate client, :previously_new_record?
end
def test_becomes_initializes_missing_attributes
company = Company.new(name: "GrowingCompany")
client = company.becomes(LargeClient)
assert_equal 50, client.extra_size
end
def test_becomes_keeps_extra_attributes
client = LargeClient.new(name: "ShrinkingCompany")
company = client.becomes(Company)
assert_equal 50, company.extra_size
assert_equal 50, client.extra_size
end
def test_becomes_same_class_makes_clone
original = Company.new(name: "GrowingCompany")
clone = original.becomes(Company)
assert_instance_of Company, clone
assert_not_equal original.object_id, clone.object_id
end
def test_delete_many
original_count = Topic.count
Topic.delete(deleting = [1, 2])
assert_equal original_count - deleting.size, Topic.count
end
def test_decrement_attribute
assert_equal 50, accounts(:signals37).credit_limit
accounts(:signals37).decrement!(:credit_limit)
assert_equal 49, accounts(:signals37, :reload).credit_limit
accounts(:signals37).decrement(:credit_limit).decrement!(:credit_limit)
assert_equal 47, accounts(:signals37, :reload).credit_limit
end
def test_decrement_attribute_by
assert_equal 50, accounts(:signals37).credit_limit
accounts(:signals37).decrement! :credit_limit, 5
assert_equal 45, accounts(:signals37, :reload).credit_limit
accounts(:signals37).decrement(:credit_limit, 1).decrement!(:credit_limit, 3)
assert_equal 41, accounts(:signals37, :reload).credit_limit
end
def test_decrement_with_touch_updates_timestamps
topic = topics(:first)
assert_equal 1, topic.replies_count
previously_updated_at = topic.updated_at
travel(1.second) do
topic.decrement!(:replies_count, touch: true)
end
assert_equal 0, topic.reload.replies_count
assert_operator previously_updated_at, :<, topic.updated_at
end
def test_decrement_with_touch_an_attribute_updates_timestamps
topic = topics(:first)
assert_equal 1, topic.replies_count
previously_updated_at = topic.updated_at
previously_written_on = topic.written_on
travel(1.second) do
topic.decrement!(:replies_count, touch: :written_on)
end
assert_equal 0, topic.reload.replies_count
assert_operator previously_updated_at, :<, topic.updated_at
assert_operator previously_written_on, :<, topic.written_on
end
def test_create
topic = Topic.new
topic.title = "New Topic"
topic.save
topic_reloaded = Topic.find(topic.id)
assert_equal("New Topic", topic_reloaded.title)
end
def test_create_prefetched_pk
post = PostWithPrefetchedPk.create!(title: "New Message", body: "New Body")
assert_equal 123456, post.id
end
def test_create_model_with_uuid_pk_populates_id
message = ChatMessage.create(content: "New Message")
assert_not_nil message.id
message_reloaded = ChatMessage.find(message.id)
assert_equal "New Message", message_reloaded.content
end if current_adapter?(:PostgreSQLAdapter)
def test_create_model_with_custom_named_uuid_pk_populates_id
message = ChatMessageCustomPk.create(content: "New Message")
assert_not_nil message.message_id
message_reloaded = ChatMessageCustomPk.find(message.message_id)
assert_equal "New Message", message_reloaded.content
end if current_adapter?(:PostgreSQLAdapter)
def test_build
topic = Topic.build(title: "New Topic")
assert_equal "New Topic", topic.title
assert_not_predicate topic, :persisted?
end
def test_build_many
topics = Topic.build([{ title: "first" }, { title: "second" }])
assert_equal ["first", "second"], topics.map(&:title)
topics.each { |topic| assert_not_predicate topic, :persisted? }
end
def test_build_through_factory_with_block
topic = Topic.build("title" => "New Topic") do |t|
t.author_name = "David"
end
assert_equal("New Topic", topic.title)
assert_equal("David", topic.author_name)
assert_not_predicate topic, :persisted?
end
def test_build_many_through_factory_with_block
topics = Topic.build([{ "title" => "first" }, { "title" => "second" }]) do |t|
t.author_name = "David"
end
assert_equal 2, topics.size
topics.each { |topic| assert_not_predicate topic, :persisted? }
topic1, topic2 = topics
assert_equal "first", topic1.title
assert_equal "David", topic1.author_name
assert_equal "second", topic2.title
assert_equal "David", topic2.author_name
end
def test_save_valid_record
topic = Topic.new(title: "New Topic")
assert topic.save!
end
def test_save_invalid_record
reply = WrongReply.new(title: "New reply")
error = assert_raise(ActiveRecord::RecordInvalid) { reply.save! }
assert_equal "Validation failed: Content Empty", error.message
end
def test_save_destroyed_object
topic = Topic.create!(title: "New Topic")
topic.destroy!
error = assert_raise(ActiveRecord::RecordNotSaved) { topic.save! }
assert_equal "Failed to save the record", error.message
end
def test_save_null_string_attributes
topic = Topic.find(1)
topic.attributes = { "title" => "null", "author_name" => "null" }
topic.save!
topic.reload
assert_equal("null", topic.title)
assert_equal("null", topic.author_name)
end
def test_save_nil_string_attributes
topic = Topic.find(1)
topic.title = nil
topic.save!
topic.reload
assert_nil topic.title
end
def test_save_for_record_with_only_primary_key
minimalistic = Minimalistic.new
assert_nothing_raised { minimalistic.save }
end
def test_save_for_record_with_only_primary_key_that_is_provided
assert_nothing_raised { Minimalistic.create!(id: 2) }
end
def test_save_with_duping_of_destroyed_object
developer = Developer.first
developer.destroy
new_developer = developer.dup
new_developer.save
assert_predicate new_developer, :persisted?
assert_not_predicate new_developer, :destroyed?
end
def test_create_many
topics = Topic.create([ { "title" => "first" }, { "title" => "second" }])
assert_equal 2, topics.size
assert_equal "first", topics.first.title
end
def test_create_columns_not_equal_attributes
topic = Topic.instantiate(
"title" => "Another New Topic",
"does_not_exist" => "test"
)
topic = topic.dup # reset @new_record
assert_nothing_raised { topic.save }
assert_predicate topic, :persisted?
assert_equal "Another New Topic", topic.reload.title
end
def test_create_through_factory_with_block
topic = Topic.create("title" => "New Topic") do |t|
t.author_name = "David"
end
assert_equal("New Topic", topic.title)
assert_equal("David", topic.author_name)
end
def test_create_many_through_factory_with_block
topics = Topic.create([ { "title" => "first" }, { "title" => "second" }]) do |t|
t.author_name = "David"
end
assert_equal 2, topics.size
topic1, topic2 = Topic.find(topics[0].id), Topic.find(topics[1].id)
assert_equal "first", topic1.title
assert_equal "David", topic1.author_name
assert_equal "second", topic2.title
assert_equal "David", topic2.author_name
end
def test_update_object
topic = Topic.new
topic.title = "Another New Topic"
topic.written_on = "2003-12-12 23:23:00"
topic.save
topic_reloaded = Topic.find(topic.id)
assert_equal("Another New Topic", topic_reloaded.title)
topic_reloaded.title = "Updated topic"
topic_reloaded.save
topic_reloaded_again = Topic.find(topic.id)
assert_equal("Updated topic", topic_reloaded_again.title)
end
def test_update_columns_not_equal_attributes
topic = Topic.new
topic.title = "Still another topic"
topic.save
topic_reloaded = Topic.instantiate(topic.attributes.merge("does_not_exist" => "test"))
topic_reloaded.title = "A New Topic"
assert_nothing_raised { topic_reloaded.save }
assert_predicate topic_reloaded, :persisted?
assert_equal "A New Topic", topic_reloaded.reload.title
end
def test_update_for_record_with_only_primary_key
minimalistic = minimalistics(:first)
assert_nothing_raised { minimalistic.save }
end
def test_update_sti_type
assert_instance_of Reply, topics(:second)
topic = topics(:second).becomes!(Topic)
assert_instance_of Topic, topic
topic.save!
assert_instance_of Topic, Topic.find(topic.id)
end
def test_preserve_original_sti_type
reply = topics(:second)
assert_equal "Reply", reply.type
topic = reply.becomes(Topic)
assert_equal "Reply", reply.type
assert_instance_of Topic, topic
assert_equal "Reply", topic.type
end
def test_update_sti_subclass_type
assert_instance_of Topic, topics(:first)
reply = topics(:first).becomes!(Reply)
assert_instance_of Reply, reply
reply.save!
assert_instance_of Reply, Reply.find(reply.id)
end
def test_becomes_default_sti_subclass
original_type = Topic.columns_hash["type"].default
ActiveRecord::Base.lease_connection.change_column_default :topics, :type, "Reply"
Topic.reset_column_information
reply = topics(:second)
assert_instance_of Reply, reply
topic = reply.becomes(Topic)
assert_instance_of Topic, topic
ensure
ActiveRecord::Base.lease_connection.change_column_default :topics, :type, original_type
Topic.reset_column_information
end
def test_update_after_create
klass = Class.new(Topic) do
def self.name; "Topic"; end
after_create do
update_attribute("author_name", "David")
end
end
topic = klass.new
topic.title = "Another New Topic"
topic.save
topic_reloaded = Topic.find(topic.id)
assert_equal("Another New Topic", topic_reloaded.title)
assert_equal("David", topic_reloaded.author_name)
end
def test_update_attribute_after_update
klass = Class.new(Topic) do
def self.name; "Topic"; end
after_update :update_author, if: :saved_change_to_title?
def update_author
update_attribute("author_name", "David")
end
end
topic = klass.create(title: "New Topic")
topic.update(title: "Another Topic")
topic_reloaded = Topic.find(topic.id)
assert_equal("Another Topic", topic_reloaded.title)
assert_equal("David", topic_reloaded.author_name)
end
def test_update_attribute_in_before_validation_respects_callback_chain
klass = Class.new(Topic) do
def self.name; "Topic"; end
before_validation :set_author_name
after_create :track_create
after_update :call_once, if: :saved_change_to_author_name?
attr_reader :counter
def set_author_name
update_attribute :author_name, "David"
end
def track_create
call_once if saved_change_to_author_name?
end
def call_once
@counter ||= 0
@counter += 1
end
end
comment = klass.create(title: "New Topic", author_name: "Not David")
assert_equal 1, comment.counter
end
def test_update_attribute_does_not_run_sql_if_attribute_is_not_changed
topic = Topic.create(title: "Another New Topic")
assert_no_queries do
assert topic.update_attribute(:title, "Another New Topic")
end
end
def test_update_does_not_run_sql_if_record_has_not_changed
topic = Topic.create(title: "Another New Topic")
assert_no_queries do
assert topic.update(title: "Another New Topic")
end
end
def test_delete
topic = Topic.find(1)
assert_equal topic, topic.delete, "topic.delete did not return self"
assert_predicate topic, :frozen?, "topic not frozen after delete"
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(topic.id) }
end
def test_delete_doesnt_run_callbacks
Topic.find(1).delete
assert_not_nil Topic.find(2)
end
def test_delete_isnt_affected_by_scoping
topic = Topic.find(1)
assert_difference("Topic.count", -1) do
Topic.where("1=0").scoping { topic.delete }
end
end
def test_destroy
topic = Topic.find(1)
assert_equal topic, topic.destroy, "topic.destroy did not return self"
assert_predicate topic, :frozen?, "topic not frozen after destroy"
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(topic.id) }
end
def test_destroy!
topic = Topic.find(1)
assert_equal topic, topic.destroy!, "topic.destroy! did not return self"
assert_predicate topic, :frozen?, "topic not frozen after destroy!"
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(topic.id) }
end
def test_destroy_for_a_failed_to_destroy_cpk_record
book = cpk_books(:cpk_great_author_first_book)
book.fail_destroy = true
assert_raises(ActiveRecord::RecordNotDestroyed, match: /Failed to destroy Cpk::Book with \["author_id", "id"\]=/) do
book.destroy!
end
end
def test_find_raises_record_not_found_exception
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(99999) }
end
def test_update_raises_record_not_found_exception
assert_raise(ActiveRecord::RecordNotFound) { Topic.update(99999, approved: true) }
end
def test_destroy_raises_record_not_found_exception
assert_raise(ActiveRecord::RecordNotFound) { Topic.destroy(99999) }
end
def test_update_all
assert_equal Topic.count, Topic.update_all("content = 'bulk updated!'")
assert_equal "bulk updated!", Topic.find(1).content
assert_equal "bulk updated!", Topic.find(2).content
assert_equal Topic.count, Topic.update_all(["content = ?", "bulk updated again!"])
assert_equal "bulk updated again!", Topic.find(1).content
assert_equal "bulk updated again!", Topic.find(2).content
assert_equal Topic.count, Topic.update_all(["content = ?", nil])
assert_nil Topic.find(1).content
end
def test_update_all_with_hash
assert_not_nil Topic.find(1).last_read
assert_equal Topic.count, Topic.update_all(content: "bulk updated with hash!", last_read: nil)
assert_equal "bulk updated with hash!", Topic.find(1).content
assert_equal "bulk updated with hash!", Topic.find(2).content
assert_nil Topic.find(1).last_read
assert_nil Topic.find(2).last_read
end
def test_update_all_with_custom_sql_as_value
person = people(:michael)
person.update!(cars_count: 0)
Person.update_all(cars_count: Arel.sql(<<~SQL))
select count(*) from cars where cars.person_id = people.id
SQL
assert_equal 1, person.reload.cars_count
end
def test_delete_new_record
client = Client.new(name: "37signals")
client.delete
assert_predicate client, :frozen?
assert_not client.save
assert_raise(ActiveRecord::RecordNotSaved) { client.save! }
assert_predicate client, :frozen?
assert_raise(RuntimeError) { client.name = "something else" }
end
def test_delete_record_with_associations
client = Client.find(3)
client.delete
assert_predicate client, :frozen?
assert_kind_of Firm, client.firm
assert_not client.save
assert_raise(ActiveRecord::RecordNotSaved) { client.save! }
assert_predicate client, :frozen?
assert_raise(RuntimeError) { client.name = "something else" }
end
def test_destroy_new_record
client = Client.new(name: "37signals")
client.destroy
assert_predicate client, :frozen?
assert_not client.save
assert_raise(ActiveRecord::RecordNotSaved) { client.save! }
assert_predicate client, :frozen?
assert_raise(RuntimeError) { client.name = "something else" }
end
def test_destroy_record_with_associations
client = Client.find(3)
client.destroy
assert_predicate client, :frozen?
assert_kind_of Firm, client.firm
assert_not client.save
assert_raise(ActiveRecord::RecordNotSaved) { client.save! }
assert_predicate client, :frozen?
assert_raise(RuntimeError) { client.name = "something else" }
end
def test_update_attribute
assert_not_predicate Topic.find(1), :approved?
Topic.find(1).update_attribute("approved", true)
assert_predicate Topic.find(1), :approved?
Topic.find(1).update_attribute(:approved, false)
assert_not_predicate Topic.find(1), :approved?
Topic.find(1).update_attribute(:change_approved_before_save, true)
assert_predicate Topic.find(1), :approved?
end
def test_update_attribute_for_readonly_attribute
minivan = Minivan.find("m1")
assert_raises(ActiveRecord::ActiveRecordError) { minivan.update_attribute(:color, "black") }
end
def test_update_attribute_with_one_updated
t = Topic.first
t.update_attribute(:title, "super_title")
assert_equal "super_title", t.title
assert_not t.changed?, "topic should not have changed"
assert_not t.title_changed?, "title should not have changed"
assert_nil t.title_change, "title change should be nil"
t.reload
assert_equal "super_title", t.title
end
def test_update_attribute_for_updated_at_on
developer = Developer.find(1)
prev_month = Time.now.prev_month.change(usec: 0)
developer.update_attribute(:updated_at, prev_month)
assert_equal prev_month, developer.updated_at
developer.update_attribute(:salary, 80001)
assert_not_equal prev_month, developer.updated_at
developer.reload
assert_not_equal prev_month, developer.updated_at
end
def test_update_attribute!
assert_not_predicate Topic.find(1), :approved?
Topic.find(1).update_attribute!("approved", true)
assert_predicate Topic.find(1), :approved?
Topic.find(1).update_attribute!(:approved, false)
assert_not_predicate Topic.find(1), :approved?
Topic.find(1).update_attribute!(:change_approved_before_save, true)
assert_predicate Topic.find(1), :approved?
end
def test_update_attribute_for_readonly_attribute!
minivan = Minivan.find("m1")
assert_raises(ActiveRecord::ActiveRecordError) { minivan.update_attribute!(:color, "black") }
end
def test_update_attribute_with_one_updated!
t = Topic.first
t.update_attribute!(:title, "super_title")
assert_equal "super_title", t.title
assert_not t.changed?, "topic should not have changed"
assert_not t.title_changed?, "title should not have changed"
assert_nil t.title_change, "title change should be nil"
t.reload
assert_equal "super_title", t.title
end
def test_update_attribute_for_updated_at_on!
developer = Developer.find(1)
prev_month = Time.now.prev_month.change(usec: 0)
developer.update_attribute!(:updated_at, prev_month)
assert_equal prev_month, developer.updated_at
developer.update_attribute!(:salary, 80001)
assert_not_equal prev_month, developer.updated_at
developer.reload
assert_not_equal prev_month, developer.updated_at
end
def test_update_attribute_for_aborted_callback!
klass = Class.new(Topic) do
def self.name; "Topic"; end
before_update :throw_abort
def throw_abort
throw(:abort)
end
end
t = klass.create(title: "New Topic", author_name: "Not David")
assert_raises(ActiveRecord::RecordNotSaved) { t.update_attribute!(:title, "super_title") }
t_reloaded = Topic.find(t.id)
assert_equal "New Topic", t_reloaded.title
end
def test_update_column
topic = Topic.find(1)
topic.update_column("approved", true)
assert_predicate topic, :approved?
topic.reload
assert_predicate topic, :approved?
topic.update_column(:approved, false)
assert_not_predicate topic, :approved?
topic.reload
assert_not_predicate topic, :approved?
end
def test_update_column_touch_option
topic = Topic.find(1)
assert_changes -> { topic.updated_at } do
travel(1.second) do
topic.update_column(:title, "super_title", touch: true)
end
end
end
def test_update_column_touch_option_with_specific_time
topic = Topic.find(1)
new_updated_at = Date.parse("2024-03-31 12:00:00")
assert_changes -> { topic.updated_at }, to: new_updated_at do
topic.update_column(:title, "super_title", touch: { time: new_updated_at })
end
end
def test_update_column_should_not_use_setter_method
dev = Developer.find(1)
dev.instance_eval { def salary=(value); write_attribute(:salary, value * 2); end }
dev.update_column(:salary, 80000)
assert_equal 80000, dev.salary
dev.reload
assert_equal 80000, dev.salary
end
def test_update_column_should_raise_exception_if_new_record
topic = Topic.new
assert_raises(ActiveRecord::ActiveRecordError) { topic.update_column("approved", false) }
end
def test_update_column_should_not_leave_the_object_dirty
topic = Topic.find(1)
topic.update_column("content", "--- Have a nice day\n...\n")
topic.reload
topic.update_column(:content, "--- You too\n...\n")
assert_equal [], topic.changed
topic.reload
topic.update_column("content", "--- Have a nice day\n...\n")
assert_equal [], topic.changed
end
def test_update_column_with_model_having_primary_key_other_than_id
minivan = Minivan.find("m1")
new_name = "sebavan"
minivan.update_column(:name, new_name)
assert_equal new_name, minivan.name
end
def test_update_column_for_readonly_attribute
minivan = Minivan.find("m1")
prev_color = minivan.color
assert_raises(ActiveRecord::ActiveRecordError) { minivan.update_column(:color, "black") }
assert_equal prev_color, minivan.color
end
def test_update_column_should_not_modify_updated_at
developer = Developer.find(1)
prev_month = Time.now.prev_month.change(usec: 0)
developer.update_column(:updated_at, prev_month)
assert_equal prev_month, developer.updated_at
developer.update_column(:salary, 80001)
assert_equal prev_month, developer.updated_at
developer.reload
assert_equal prev_month.to_i, developer.updated_at.to_i
end
def test_update_column_with_one_changed_and_one_updated
t = Topic.order("id").limit(1).first
author_name = t.author_name
t.author_name = "John"
t.update_column(:title, "super_title")
assert_equal "John", t.author_name
assert_equal "super_title", t.title
assert_predicate t, :changed?, "topic should have changed"
assert_predicate t, :author_name_changed?, "author_name should have changed"
t.reload
assert_equal author_name, t.author_name
assert_equal "super_title", t.title
end
def test_update_column_with_default_scope
developer = DeveloperCalledDavid.first
developer.name = "John"
developer.save!
assert developer.update_column(:name, "Will"), "did not update record due to default scope"
end
def test_update_columns
topic = Topic.find(1)
topic.update_columns("approved" => true, title: "Sebastian Topic")
assert_predicate topic, :approved?
assert_equal "Sebastian Topic", topic.title
topic.reload
assert_predicate topic, :approved?
assert_equal "Sebastian Topic", topic.title
end
def test_update_columns_touch_option_updates_timestamps
topic = Topic.find(1)
assert_changes -> { topic.updated_at } do
travel(1.second) do
topic.update_columns(title: "super_title", touch: true)
end
end
end
def test_update_columns_touch_option_explicit_column_names
topic = Topic.find(1)
assert_changes -> { [topic.updated_at, topic.written_on] } do
travel(1.second) do
topic.update_columns(title: "super_title", touch: :written_on)
end
end
end
def test_update_columns_touch_option_not_overwrite_explicit_attribute
topic = Topic.find(1)
new_updated_at = Date.parse("2024-03-31 12:00:00")
assert_changes -> { topic.updated_at }, to: new_updated_at do
topic.update_columns(title: "super_title", updated_at: new_updated_at, touch: true)
end
end
def test_update_columns_touch_option_not_overwrite_explicit_attribute_with_string_key
topic = Topic.find(1)
new_updated_at = Date.parse("2024-03-31 12:00:00")
assert_changes -> { topic.updated_at }, to: new_updated_at do
topic.update_columns(title: "super_title", "updated_at" => new_updated_at, touch: true)
end
end
def test_update_columns_touch_option_with_specific_time
topic = Topic.find(1)
new_updated_at = Date.parse("2024-03-31 12:00:00")
assert_changes -> { topic.updated_at }, to: new_updated_at do
topic.update_columns(title: "super_title", touch: { time: new_updated_at })
end
end
def test_update_columns_should_not_use_setter_method
dev = Developer.find(1)
dev.instance_eval { def salary=(value); write_attribute(:salary, value * 2); end }
dev.update_columns(salary: 80000)
assert_equal 80000, dev.salary
dev.reload
assert_equal 80000, dev.salary
end
def test_update_columns_should_raise_exception_if_new_record
topic = Topic.new
assert_raises(ActiveRecord::ActiveRecordError) { topic.update_columns(approved: false) }
end
def test_update_columns_should_not_leave_the_object_dirty
topic = Topic.find(1)
topic.update("content" => "--- Have a nice day\n...\n", :author_name => "Jose")
topic.reload
topic.update_columns(content: "--- You too\n...\n", "author_name" => "Sebastian")
assert_equal [], topic.changed
topic.reload
topic.update_columns(content: "--- Have a nice day\n...\n", author_name: "Jose")
assert_equal [], topic.changed
end
def test_update_columns_with_model_having_primary_key_other_than_id
minivan = Minivan.find("m1")
new_name = "sebavan"
minivan.update_columns(name: new_name)
assert_equal new_name, minivan.name
end
def test_update_columns_with_one_readonly_attribute
minivan = Minivan.find("m1")
prev_color = minivan.color
prev_name = minivan.name
assert_raises(ActiveRecord::ActiveRecordError) { minivan.update_columns(name: "My old minivan", color: "black") }
assert_equal prev_color, minivan.color
assert_equal prev_name, minivan.name
minivan.reload
assert_equal prev_color, minivan.color
assert_equal prev_name, minivan.name
end
def test_update_columns_should_not_modify_updated_at
developer = Developer.find(1)
prev_month = Time.now.prev_month.change(usec: 0)
developer.update_columns(updated_at: prev_month)
assert_equal prev_month, developer.updated_at
developer.update_columns(salary: 80000)
assert_equal prev_month, developer.updated_at
assert_equal 80000, developer.salary
developer.reload
assert_equal prev_month.to_i, developer.updated_at.to_i
assert_equal 80000, developer.salary
end
def test_update_columns_with_one_changed_and_one_updated
t = Topic.order("id").limit(1).first
author_name = t.author_name
t.author_name = "John"
t.update_columns(title: "super_title")
assert_equal "John", t.author_name
assert_equal "super_title", t.title
assert_predicate t, :changed?, "topic should have changed"
assert_predicate t, :author_name_changed?, "author_name should have changed"
t.reload
assert_equal author_name, t.author_name
assert_equal "super_title", t.title
end
def test_update_columns_changing_id
topic = Topic.find(1)
topic.update_columns(id: 123)
assert_equal 123, topic.id
topic.reload
assert_equal 123, topic.id
end
def test_update_columns_returns_boolean
topic = Topic.find(1)
assert_equal true, topic.update_columns(title: "New title")
end
def test_update_columns_with_default_scope
developer = DeveloperCalledDavid.first
developer.name = "John"
developer.save!
assert developer.update_columns(name: "Will"), "did not update record due to default scope"
end
def test_update
topic = Topic.find(1)
assert_not_predicate topic, :approved?
assert_equal "The First Topic", topic.title
topic.update("approved" => true, "title" => "The First Topic Updated")
topic.reload
assert_predicate topic, :approved?
assert_equal "The First Topic Updated", topic.title
topic.update(approved: false, title: "The First Topic")
topic.reload
assert_not_predicate topic, :approved?
assert_equal "The First Topic", topic.title
error = assert_raise(ActiveRecord::RecordNotUnique, ActiveRecord::StatementInvalid) do
topic.update(id: 3, title: "Hm is it possible?")
end
assert_not_nil error.cause
assert_not_equal "Hm is it possible?", Topic.find(3).title
topic.update(id: 1234)
assert_nothing_raised { topic.reload }
assert_equal topic.title, Topic.find(1234).title
end
def test_update_parameters
topic = Topic.find(1)
assert_nothing_raised do
topic.update({})
end
assert_raises(ArgumentError) do
topic.update(nil)
end
end
def test_update!
Reply.validates_presence_of(:title)
reply = Reply.find(2)
assert_equal "The Second Topic of the day", reply.title
assert_equal "Have a nice day", reply.content
reply.update!("title" => "The Second Topic of the day updated", "content" => "Have a nice evening")
reply.reload
assert_equal "The Second Topic of the day updated", reply.title
assert_equal "Have a nice evening", reply.content
reply.update!(title: "The Second Topic of the day", content: "Have a nice day")
reply.reload
assert_equal "The Second Topic of the day", reply.title
assert_equal "Have a nice day", reply.content
assert_raise(ActiveRecord::RecordInvalid) { reply.update!(title: nil, content: "Have a nice evening") }
ensure
Reply.clear_validators!
end
def test_destroyed_returns_boolean
developer = Developer.first
assert_equal false, developer.destroyed?
developer.destroy
assert_equal true, developer.destroyed?
developer = Developer.last
assert_equal false, developer.destroyed?
developer.delete
assert_equal true, developer.destroyed?
end
def test_persisted_returns_boolean
developer = Developer.new(name: "Jose")
assert_equal false, developer.persisted?
developer.save!
assert_equal true, developer.persisted?
developer = Developer.first
assert_equal true, developer.persisted?
developer.destroy
assert_equal false, developer.persisted?
developer = Developer.last
assert_equal true, developer.persisted?
developer.delete
assert_equal false, developer.persisted?
end
def test_class_level_destroy
should_be_destroyed_reply = Reply.create("title" => "hello", "content" => "world")
Topic.find(1).replies << should_be_destroyed_reply
topic = Topic.destroy(1)
assert_predicate topic, :destroyed?
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(1) }
assert_raise(ActiveRecord::RecordNotFound) { Reply.find(should_be_destroyed_reply.id) }
end
def test_class_level_destroy_is_affected_by_scoping
should_not_be_destroyed_reply = Reply.create("title" => "hello", "content" => "world")
Topic.find(1).replies << should_not_be_destroyed_reply
assert_raise(ActiveRecord::RecordNotFound) do
Topic.where("1=0").scoping { Topic.destroy(1) }
end
assert_nothing_raised { Topic.find(1) }
assert_nothing_raised { Reply.find(should_not_be_destroyed_reply.id) }
end
def test_class_level_delete
should_not_be_destroyed_reply = Reply.create("title" => "hello", "content" => "world")
Topic.find(1).replies << should_not_be_destroyed_reply
Topic.delete(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(1) }
assert_nothing_raised { Reply.find(should_not_be_destroyed_reply.id) }
end
def test_class_level_delete_with_invalid_ids
assert_no_queries do
assert_equal 0, Topic.delete(nil)
assert_equal 0, Topic.delete([])
end
assert_difference -> { Topic.count }, -1 do
assert_equal 1, Topic.delete(topics(:first).id)
end
end
def test_class_level_delete_is_affected_by_scoping
should_not_be_destroyed_reply = Reply.create("title" => "hello", "content" => "world")
Topic.find(1).replies << should_not_be_destroyed_reply
Topic.where("1=0").scoping { Topic.delete(1) }
assert_nothing_raised { Topic.find(1) }
assert_nothing_raised { Reply.find(should_not_be_destroyed_reply.id) }
end
def test_create_with_custom_timestamps
custom_datetime = 1.hour.ago.beginning_of_day
%w(created_at created_on updated_at updated_on).each do |attribute|
parrot = LiveParrot.create(:name => "colombian", attribute => custom_datetime)
assert_equal custom_datetime, parrot[attribute]
end
end
def test_persist_inherited_class_with_different_table_name
minimalistic_aircrafts = Class.new(Minimalistic) do
self.table_name = "aircraft"
end
assert_difference "Aircraft.count", 1 do
aircraft = minimalistic_aircrafts.create(name: "Wright Flyer")
aircraft.name = "Wright Glider"
aircraft.save
end
assert_equal "Wright Glider", Aircraft.last.name
end
def test_instantiate_creates_a_new_instance
post = Post.instantiate("title" => "appropriate documentation", "type" => "SpecialPost")
assert_equal "appropriate documentation", post.title
assert_instance_of SpecialPost, post
# body was not initialized
assert_raises ActiveModel::MissingAttributeError do
post.body
end
end
def test_reload_removes_custom_selects
post = Post.select("posts.*, 1 as wibble").last!
assert_equal 1, post[:wibble]
assert_nil post.reload[:wibble]
end
def test_find_via_reload
post = Post.new
assert_predicate post, :new_record?
post.id = 1
post.reload
assert_equal "Welcome to the weblog", post.title
assert_not_predicate post, :new_record?
end
def test_reload_via_querycache
ActiveRecord::Base.lease_connection.enable_query_cache!
ActiveRecord::Base.lease_connection.clear_query_cache
assert ActiveRecord::Base.lease_connection.query_cache_enabled, "cache should be on"
parrot = Parrot.create(name: "Shane")
# populate the cache with the SELECT result
found_parrot = Parrot.find(parrot.id)
assert_equal parrot.id, found_parrot.id
# Manually update the 'name' attribute in the DB directly
assert_equal 1, ActiveRecord::Base.lease_connection.query_cache.size
ActiveRecord::Base.uncached do
found_parrot.name = "Mary"
found_parrot.save
end
# Now reload, and verify that it gets the DB version, and not the querycache version
found_parrot.reload
assert_equal "Mary", found_parrot.name
found_parrot = Parrot.find(parrot.id)
assert_equal "Mary", found_parrot.name
ensure
ActiveRecord::Base.lease_connection.disable_query_cache!
end
def test_save_touch_false
parrot = Parrot.create!(
name: "Bob",
created_at: 1.day.ago,
updated_at: 1.day.ago)
created_at = parrot.created_at
updated_at = parrot.updated_at
parrot.name = "Barb"
parrot.save!(touch: false)
assert_equal parrot.created_at, created_at
assert_equal parrot.updated_at, updated_at
end
def test_reset_column_information_resets_children
child_class = Class.new(Topic)
child_class.new # force schema to load
ActiveRecord::Base.lease_connection.add_column(:topics, :foo, :string)
Topic.reset_column_information
# this should redefine attribute methods
child_class.new
assert child_class.instance_methods.include?(:foo)
assert child_class.instance_methods.include?(:foo_changed?)
assert_equal "bar", child_class.new(foo: :bar).foo
ensure
ActiveRecord::Base.lease_connection.remove_column(:topics, :foo)
Topic.reset_column_information
end
def test_update_uses_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
sql = capture_sql { clothing_item.update(description: "Lovely green t-shirt") }.second
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
end
def test_save_uses_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
clothing_item.description = "Lovely green t-shirt"
sql = capture_sql { clothing_item.save }.second
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
end
def test_reload_uses_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
sql = capture_sql { clothing_item.reload }.first
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
end
def test_destroy_uses_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
sql = capture_sql { clothing_item.destroy }.second
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
end
def test_delete_uses_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
sql = capture_sql { clothing_item.delete }.first
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
end
def test_update_attribute_uses_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
sql = capture_sql { clothing_item.update_attribute(:description, "Lovely green t-shirt") }.second
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
end
def test_it_is_possible_to_update_parts_of_the_query_constraints_config
clothing_item = clothing_items(:green_t_shirt)
clothing_item.color = "blue"
clothing_item.description = "Now it's a blue t-shirt"
sql = capture_sql { clothing_item.save }.second
assert_match(/WHERE .*clothing_type/, sql)
assert_match(/WHERE .*color/, sql)
assert_equal("blue", ClothingItem.find_by(id: clothing_item.id).color)
end
def test_model_with_no_auto_populated_fields_still_returns_primary_key_after_insert
record = PkAutopopulatedByATriggerRecord.create
assert_not_nil record.id
assert record.id > 0
end if supports_insert_returning? && !current_adapter?(:SQLite3Adapter)
end
class QueryConstraintsTest < ActiveRecord::TestCase
fixtures :clothing_items, :dashboards, :topics, :posts
def test_primary_key_stays_the_same
assert_equal("id", ClothingItem.primary_key)
end
def test_query_constraints_list_is_nil_if_primary_key_is_nil
klass = Class.new(ActiveRecord::Base) do
self.table_name = "developers_projects"
end
assert_nil klass.primary_key
assert_nil klass.query_constraints_list
end
def test_query_constraints_list_is_nil_for_non_cpk_model
assert_nil Post.query_constraints_list
assert_nil Dashboard.query_constraints_list
end
def test_query_constraints_list_equals_to_composite_primary_key
assert_equal(["shop_id", "id"], Cpk::Order.query_constraints_list)
assert_equal(["author_id", "id"], Cpk::Book.query_constraints_list)
end
def test_child_keeps_parents_query_constraints
clothing_item = clothing_items(:green_t_shirt)
assert_uses_query_constraints_on_reload(clothing_item, ["clothing_type", "color"])
used_clothing_item = clothing_items(:used_blue_jeans)
assert_uses_query_constraints_on_reload(used_clothing_item, ["clothing_type", "color"])
end
def test_child_keeps_parents_query_constraints_derived_from_composite_pk
assert_equal(["author_id", "id"], Cpk::BestSeller.query_constraints_list)
end
def assert_uses_query_constraints_on_reload(object, columns)
flunk("columns argument must not be empty") if columns.blank?
sql = capture_sql { object.reload }.first
Array(columns).each do |column|
assert_match(/WHERE .*#{column}/, sql)
end
end
def test_query_constraints_raises_an_error_when_no_columns_provided
assert_raises(ArgumentError) do
Class.new(ActiveRecord::Base) do
self.table_name = "topics"
query_constraints
end
end
end
def test_child_class_with_query_constraints_overrides_parents
assert_equal(["clothing_type", "color", "size"], ClothingItem::Sized.query_constraints_list)
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/persistence_test.rb |
import { test, expect } from "@playwright/test";
import { PlaywrightFixture } from "./helpers/playwright-fixture.js";
import type { Fixture, AppFixture } from "./helpers/create-fixture.js";
import {
createAppFixture,
createFixture,
js,
} from "./helpers/create-fixture.js";
let fixture: Fixture;
let appFixture: AppFixture;
test.beforeAll(async () => {
fixture = await createFixture({
files: {
"app/routes/_index.tsx": js`
import { Form, useLoaderData, useActionData } from "react-router";
async function requestToJson(request) {
let body = null;
if (request.body) {
let fd = await request.formData();
body = Object.fromEntries(fd.entries());
}
return {
method: request.method,
url: request.url,
headers: Object.fromEntries(request.headers.entries()),
body,
};
}
export async function loader({ request }) {
return requestToJson(request);
}
export function action({ request }) {
return requestToJson(request);
}
export default function Index() {
let loaderData = useLoaderData();
let actionData = useActionData();
return (
<div>
<button id="set-cookie" onClick={() => {
document.cookie = 'cookie=nomnom; path=/';
}}>
Set Cookie
</button>
<Form method="get" reloadDocument>
<button type="submit" id="submit-get-ssr" name="type" value="ssr">
SSR GET
</button>
</Form>
<Form method="get">
<button type="submit" id="submit-get-csr" name="type" value="csr">
CSR GET
</button>
</Form>
<Form method="post" reloadDocument>
<button type="submit" id="submit-post-ssr" name="type" value="ssr">
SSR POST
</button>
</Form>
<Form method="post">
<button type="submit" id="submit-post-csr" name="type" value="csr">
CSR POST
</button>
</Form>
<pre id="loader-data">{JSON.stringify(loaderData)}</pre>
{actionData ?
<pre id="action-data">{JSON.stringify(actionData)}</pre> :
null}
</div>
)
}
`,
},
});
appFixture = await createAppFixture(fixture);
});
test.afterAll(() => appFixture.close());
test("loader request on SSR GET requests", async ({ page }) => {
let app = new PlaywrightFixture(appFixture, page);
await app.goto("/");
await app.clickElement("#set-cookie");
let loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(loaderData.headers.cookie).toEqual(undefined);
expect(loaderData.body).toEqual(null);
await app.clickElement("#submit-get-ssr");
loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/\?type=ssr$/);
expect(loaderData.headers.cookie).toEqual("cookie=nomnom");
expect(loaderData.body).toEqual(null);
});
test("loader request on CSR GET requests", async ({ page }) => {
let app = new PlaywrightFixture(appFixture, page);
await app.goto("/");
await app.clickElement("#set-cookie");
let loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(loaderData.headers.cookie).toEqual(undefined);
expect(loaderData.body).toEqual(null);
await app.clickElement("#submit-get-csr");
loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/\?type=csr$/);
expect(loaderData.headers.cookie).toEqual("cookie=nomnom");
expect(loaderData.body).toEqual(null);
});
test("action + loader requests SSR POST requests", async ({ page }) => {
let app = new PlaywrightFixture(appFixture, page);
await app.goto("/");
await app.clickElement("#set-cookie");
let loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(loaderData.headers.cookie).toEqual(undefined);
expect(loaderData.body).toEqual(null);
await app.clickElement("#submit-post-ssr");
let actionData = JSON.parse(await page.locator("#action-data").innerHTML());
expect(actionData.method).toEqual("POST");
expect(actionData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(actionData.headers.cookie).toEqual("cookie=nomnom");
expect(actionData.body).toEqual({ type: "ssr" });
loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(loaderData.headers.cookie).toEqual("cookie=nomnom");
expect(loaderData.body).toEqual(null);
});
test("action + loader requests on CSR POST requests", async ({ page }) => {
let app = new PlaywrightFixture(appFixture, page);
await app.goto("/");
await app.clickElement("#set-cookie");
let loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(loaderData.headers.cookie).toEqual(undefined);
expect(loaderData.body).toEqual(null);
await app.clickElement("#submit-post-csr");
let actionData = JSON.parse(await page.locator("#action-data").innerHTML());
expect(actionData.method).toEqual("POST");
expect(actionData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(actionData.headers.cookie).toEqual("cookie=nomnom");
expect(actionData.body).toEqual({ type: "csr" });
loaderData = JSON.parse(await page.locator("#loader-data").innerHTML());
expect(loaderData.method).toEqual("GET");
expect(loaderData.url).toMatch(/^http:\/\/localhost:\d+\/$/);
expect(loaderData.headers.cookie).toEqual("cookie=nomnom");
expect(loaderData.body).toEqual(null);
}); | typescript | github | https://github.com/remix-run/react-router | integration/request-test.ts |
package kotlinx.coroutines.test
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import kotlin.test.*
@Suppress("DEPRECATION", "DEPRECATION_ERROR")
class TestRunBlockingOrderTest: OrderedExecutionTestBase() {
@Test
fun testLaunchImmediate() = runBlockingTest {
expect(1)
launch {
expect(2)
}
finish(3)
}
@Test
fun testYield() = runBlockingTest {
expect(1)
launch {
expect(2)
yield()
finish(4)
}
expect(3)
}
@Test
fun testLaunchWithDelayCompletes() = runBlockingTest {
expect(1)
launch {
delay(100)
finish(3)
}
expect(2)
}
@Test
fun testLaunchDelayOrdered() = runBlockingTest {
expect(1)
launch {
delay(200) // long delay
finish(4)
}
launch {
delay(100) // shorter delay
expect(3)
}
expect(2)
}
@Test
fun testVeryLongDelay() = runBlockingTest {
expect(1)
delay(100) // move time forward a bit some that naive time + delay gives an overflow
launch {
delay(Long.MAX_VALUE / 2) // very long delay
finish(4)
}
launch {
delay(100) // short delay
expect(3)
}
expect(2)
}
@Test
fun testAdvanceUntilIdle_inRunBlocking() = runBlockingTest {
expect(1)
assertRunsFast {
advanceUntilIdle() // ensure this doesn't block forever
}
finish(2)
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-test/jvm/test/migration/TestRunBlockingOrderTest.kt |
'''
Setup script for M-LOOP using setuptools. See the documentation of setuptools for further details.
'''
from __future__ import absolute_import, division, print_function
import multiprocessing as mp
import mloop as ml
from setuptools import setup, find_packages
from os import path
def main():
long_description = ''
here = path.abspath(path.dirname(__file__))
description_path = path.join(here, 'DESCRIPTION.rst')
if path.exists(description_path):
with open(description_path, 'rb') as stream:
long_description = stream.read().decode('utf8')
setup(
name = 'M-LOOP',
version = ml.__version__,
packages = find_packages(),
entry_points={
'console_scripts': [
'M-LOOP = mloop.cmd:run_mloop'
],
},
setup_requires=['pytest-runner'],
install_requires = ['pip>=7.0',
'docutils>=0.3',
'numpy>=1.11',
'scipy>=0.17',
'matplotlib>=1.5',
'pytest>=2.9',
'scikit-learn>=0.18',
'tensorflow>=2.0.0'],
tests_require=['pytest','setuptools>=26'],
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt','*.md'],
},
author = 'Michael R Hush',
author_email = 'MichaelRHush@gmail.com',
description = 'M-LOOP: Machine-learning online optimization package. A python package of automated optimization tools - enhanced with machine-learning - for quantum scientific experiments, computer controlled systems or other optimization tasks.',
long_description = long_description,
license = 'MIT',
keywords = 'automated machine learning optimization optimisation science experiment quantum',
url = 'https://github.com/michaelhush/M-LOOP/',
download_url = 'https://github.com/michaelhush/M-LOOP/tarball/3.2.1',
classifiers = ['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Manufacturing',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Physics']
)
if __name__=='__main__':
mp.freeze_support()
main() | unknown | codeparrot/codeparrot-clean | ||
import sys
from optparse import OptionParser
import os
import re
usage = "usage: %prog [options] file"
version = "2.1.0"
version_text = "%prog {}".format(version)
opt = OptionParser(usage = usage, version = version_text)
opt.add_option ("-l","--language"
,action = "store"
,dest = "language", default = 0
,help = "manualy select the language")
opt.add_option ("-s","--show"
,action = "store_true"
,dest = "show", default = False
,help = "show the current version of the file")
opt.add_option ("","--major"
,action = "store_true"
,dest = "major", default = False
,help = "upgrade major version")
opt.add_option ("","--minor"
,action = "store_true"
,dest = "minor", default = False
,help = "upgrade minor version")
opt.add_option ("","--build"
,action = "store_true"
,dest = "build", default = False
,help = "upgrade build version")
(options, args) = opt.parse_args()
class Language:
Unknown, Python, Haskell, Cpp = range(0,4)
@staticmethod
def languages():
l = []
for a in dir(Language):
if not "__" in a and not a in ["parse", "languages", "Unknown"]:
l.append(a)
return ", ".join(l)
@staticmethod
def parse( text ):
text = text.lower()
d = {
"python" : Language.Python,
"haskell" : Language.Haskell,
"cpp" : Language.Cpp,
}
if text in d:
return d[text]
for k,v in d.iteritems():
if text in k:
return v
return Language.Unknown
try:
options.file_path = args[0]
except:
sys.stderr.write("No input file!")
exit(2)
if not os.path.isfile(options.file_path):
sys.stderr.write("{} not exists!".format(options.file_path))
exit(3)
if options.language:
lan = Language.parse(options.language)
if lan == Language.Unknown:
sys.stderr.write("Incorrect language, available languages: {}".format(Language.languages()))
exit(1)
options.language = lan
else:
_, ext = os.path.splitext(options.file_path)
exts = {
".py" : Language.Python,
".cabal" : Language.Haskell,
".hpp" : Language.Cpp,
".cpp" : Language.Cpp,
}
options.language = exts.get(ext, Language.Unknown)
if options.language == Language.Unknown:
sys.stderr.write("Unknown language, cannot parse the file")
exit(4)
program_version_re = {
Language.Python : re.compile("version\s*=\s*\"(\d+)\.(\d+)\.(\d+)\""),
Language.Cpp : re.compile("string\s+version\s*=\s*\"(\d+)\.(\d+)\.(\d+)\""),
Language.Haskell : re.compile("version\s*:\s*(\d+)\.(\d+)\.(\d+)"),
}
program_version_update = {
Language.Python : "version = \"{}.{}.{}\"",
Language.Cpp : "string version = \"{}.{}.{}\"",
Language.Haskell : "version: {}.{}.{}",
}
def get_version(options):
program_re = program_version_re[options.language]
with open(options.file_path,"r") as f:
lines = f.readlines()
for line in lines:
m = program_re.match(line)
if m:
return (m.group(0), int(m.group(1)),int(m.group(2)),int(m.group(3)))
return None
current_version = get_version(options)
if options.major:
t,m,_,_ = current_version
current_version = (t, m + 1, 0, 0)
if options.minor:
t,m,n,_ = current_version
current_version = (t, m , n + 1, 0)
if options.build:
t,m,n,b = current_version
current_version = (t, m , n, b + 1)
if options.show:
print (current_version[0])
print ("{}.{}.{}".format(current_version[1],current_version[2],current_version[3]))
exit(0)
orig, major, minor, build = current_version
updated = program_version_update[options.language].format(major, minor, build)
text = None
with open(options.file_path,"r") as f:
text = f.read()
text = text.replace(orig, updated)
print (text) | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network-related utilities for supporting libvirt connection code."""
import os
import jinja2
import netaddr
from oslo.config import cfg
from nova.network import model
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
def get_net_and_mask(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
def _get_first_network(network, version):
# Using a generator expression with a next() call for the first element
# of a list since we don't want to evaluate the whole list as we can
# have a lot of subnets
try:
return (i for i in network['subnets']
if i['version'] == version).next()
except StopIteration:
pass
def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
template=CONF.injected_network_template):
"""Returns a rendered network template for the given network_info.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param use_ipv6: If False, do not return IPv6 template information
even if an IPv6 subnet is present in network_info.
:param template: Path to the interfaces template file.
"""
if not (network_info and template):
return
nets = []
ifc_num = -1
ipv6_is_available = False
for vif in network_info:
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(bnemec): The template only supports a single subnet per
# interface and I'm not sure how/if that can be fixed, so this
# code only takes the first subnet of the appropriate type.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
if not network.get_meta('injected'):
continue
address = None
netmask = None
gateway = ''
broadcast = None
dns = None
if subnet_v4:
if subnet_v4.get_meta('dhcp_server') is not None:
continue
if subnet_v4['ips']:
ip = subnet_v4['ips'][0]
address = ip['address']
netmask = model.get_netmask(ip, subnet_v4)
if subnet_v4['gateway']:
gateway = subnet_v4['gateway']['address']
broadcast = str(subnet_v4.as_netaddr().broadcast)
dns = ' '.join([i['address'] for i in subnet_v4['dns']])
address_v6 = None
gateway_v6 = ''
netmask_v6 = None
have_ipv6 = (use_ipv6 and subnet_v6)
if have_ipv6:
if subnet_v6.get_meta('dhcp_server') is not None:
continue
if subnet_v6['ips']:
ipv6_is_available = True
ip_v6 = subnet_v6['ips'][0]
address_v6 = ip_v6['address']
netmask_v6 = model.get_netmask(ip_v6, subnet_v6)
if subnet_v6['gateway']:
gateway_v6 = subnet_v6['gateway']['address']
net_info = {'name': 'eth%d' % ifc_num,
'address': address,
'netmask': netmask,
'gateway': gateway,
'broadcast': broadcast,
'dns': dns,
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
nets.append(net_info)
if not nets:
return
return build_template(template, nets, ipv6_is_available)
def build_template(template, nets, ipv6_is_available):
tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': nets,
'use_ipv6': ipv6_is_available}) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.boottestrun.nomain;
/**
* Application used for testing {@code bootTestRun}'s handling of no test main method
*
* @author Andy Wilkinson
*/
public class BootTestRunNoMain {
} | java | github | https://github.com/spring-projects/spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/resources/com/example/boottestrun/nomain/BootTestRunNoMain.java |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide merge capabilities for events.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib import Person, Family
from ..db import DbTxn
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..errors import MergeError
#-------------------------------------------------------------------------
#
# MergeEventQuery
#
#-------------------------------------------------------------------------
class MergeEventQuery:
"""
Create database query to merge two events.
"""
def __init__(self, dbstate, phoenix, titanic):
self.database = dbstate.db
self.phoenix = phoenix
self.titanic = titanic
def execute(self):
"""
Merges two events into a single event.
"""
new_handle = self.phoenix.get_handle()
old_handle = self.titanic.get_handle()
self.phoenix.merge(self.titanic)
with DbTxn(_("Merge Event Objects"), self.database) as trans:
self.database.commit_event(self.phoenix, trans)
for (class_name, handle) in self.database.find_backlink_handles(
old_handle):
if class_name == Person.__name__:
person = self.database.get_person_from_handle(handle)
assert(person.has_handle_reference("Event", old_handle))
bri = person.birth_ref_index
dri = person.death_ref_index
person.replace_handle_reference("Event", old_handle,
new_handle)
if person.birth_ref_index != bri and \
person.birth_ref_index == -1:
for index, ref in enumerate(person.get_event_ref_list()):
event = self.database.get_event_from_handle(ref.ref)
if event.type.is_birth() and ref.role.is_primary():
person.birth_ref_index = index
break
if person.death_ref_index != dri and \
person.death_ref_index == -1:
for index, ref in enumerate(person.get_event_ref_list()):
event = self.database.get_event_from_handle(ref.ref)
if event.type.is_death() and ref.role.is_primary():
person.death_ref_index = index
break
self.database.commit_person(person, trans)
elif class_name == Family.__name__:
family = self.database.get_family_from_handle(handle)
assert(family.has_handle_reference("Event", old_handle))
family.replace_handle_reference("Event", old_handle,
new_handle)
self.database.commit_family(family, trans)
else:
raise MergeError("Encounter an object of type %s that has "
"an event reference." % class_name)
self.database.remove_event(old_handle, trans) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.inventory import Inventory
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a',None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
def setUp(self):
v = VariableManager()
fake_loader = DictDataLoader({})
self.i = Inventory(loader=fake_loader, variable_manager=v, host_list='')
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, self.i._split_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, self.i._split_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
) | unknown | codeparrot/codeparrot-clean | ||
from wtforms import TextField
from wtforms import IntegerField as _IntegerField
from wtforms import DecimalField as _DecimalField
from wtforms import DateField as _DateField
from wtforms.widgets import Input
class DateInput(Input):
"""
Creates `<input type=date>` widget
"""
input_type = "date"
class NumberInput(Input):
"""
Creates `<input type=number>` widget
"""
input_type = "number"
class RangeInput(Input):
"""
Creates `<input type=range>` widget
"""
input_type = "range"
class URLInput(Input):
"""
Creates `<input type=url>` widget
"""
input_type = "url"
class EmailInput(Input):
"""
Creates `<input type=email>` widget
"""
input_type = "email"
class SearchInput(Input):
"""
Creates `<input type=search>` widget
"""
input_type = "search"
class TelInput(Input):
"""
Creates `<input type=tel>` widget
"""
input_type = "tel"
class SearchField(TextField):
"""
**TextField** using **SearchInput** by default
"""
widget = SearchInput()
class DateField(_DateField):
"""
**DateField** using **DateInput** by default
"""
widget = DateInput()
class URLField(TextField):
"""
**TextField** using **URLInput** by default
"""
widget = URLInput()
class EmailField(TextField):
"""
**TextField** using **EmailInput** by default
"""
widget = EmailInput()
class TelField(TextField):
"""
**TextField** using **TelInput** by default
"""
widget = TelInput()
class IntegerField(_IntegerField):
"""
**IntegerField** using **NumberInput** by default
"""
widget = NumberInput()
class DecimalField(_DecimalField):
"""
**DecimalField** using **NumberInput** by default
"""
widget = NumberInput()
class IntegerRangeField(_IntegerField):
"""
**IntegerField** using **RangeInput** by default
"""
widget = RangeInput()
class DecimalRangeField(_DecimalField):
"""
**DecimalField** using **RangeInput** by default
"""
widget = RangeInput() | unknown | codeparrot/codeparrot-clean | ||
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpModule(TestNxosModule):
module = nxos_bgp
def setUp(self):
super(TestNxosBgpModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp(self):
set_module_args(dict(asn=65535, router_id='192.0.2.1'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['router bgp 65535', 'router-id 192.0.2.1'])
def test_nxos_bgp_change_nothing(self):
set_module_args(dict(asn=65535, router_id='192.168.1.1'))
self.execute_module(changed=False)
def test_nxos_bgp_wrong_asn(self):
set_module_args(dict(asn=10, router_id='192.168.1.1'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Another BGP ASN already exists.')
def test_nxos_bgp_remove(self):
set_module_args(dict(asn=65535, state='absent'))
self.execute_module(changed=True, commands=['no router bgp 65535'])
def test_nxos_bgp_remove_vrf(self):
set_module_args(dict(asn=65535, vrf='test2', state='absent'))
self.execute_module(changed=True, commands=['router bgp 65535', 'no vrf test2'])
def test_nxos_bgp_remove_nonexistant_vrf(self):
set_module_args(dict(asn=65535, vrf='foo', state='absent'))
self.execute_module(changed=False)
def test_nxos_bgp_remove_wrong_asn(self):
set_module_args(dict(asn=10, state='absent'))
self.execute_module(changed=False)
def test_nxos_bgp_vrf(self):
set_module_args(dict(asn=65535, vrf='test', router_id='192.0.2.1'))
result = self.execute_module(changed=True, commands=['router bgp 65535', 'vrf test', 'router-id 192.0.2.1'])
self.assertEqual(result['warnings'], ["VRF test doesn't exist."])
def test_nxos_bgp_global_param(self):
set_module_args(dict(asn=65535, shutdown=True))
self.execute_module(changed=True, commands=['router bgp 65535', 'shutdown'])
def test_nxos_bgp_global_param_outside_default(self):
set_module_args(dict(asn=65535, vrf='test', shutdown=True))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Global params can be modified only under "default" VRF.')
def test_nxos_bgp_default_value(self):
set_module_args(dict(asn=65535, graceful_restart_timers_restart='default'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'graceful-restart restart-time 120']
)
class TestNxosBgp32BitsAS(TestNxosModule):
module = nxos_bgp
def setUp(self):
super(TestNxosBgp32BitsAS, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgp32BitsAS, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config_32_bits_as.cfg')
self.load_config.return_value = []
def test_nxos_bgp_change_nothing(self):
set_module_args(dict(asn='65535.65535', router_id='192.168.1.1'))
self.execute_module(changed=False)
def test_nxos_bgp_wrong_asn(self):
set_module_args(dict(asn='65535.10', router_id='192.168.1.1'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Another BGP ASN already exists.')
def test_nxos_bgp_remove(self):
set_module_args(dict(asn='65535.65535', state='absent'))
self.execute_module(changed=True, commands=['no router bgp 65535.65535']) | unknown | codeparrot/codeparrot-clean | ||
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.utils import importlib
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import TemplateResponseMixin
from socialregistration import signals
from socialregistration.settings import SESSION_KEY
import urlparse
ERROR_VIEW = getattr(settings, 'SOCIALREGISTRATION_ERROR_VIEW_FUNCTION',
None)
class CommonMixin(TemplateResponseMixin):
"""
Provides default functionality used such as authenticating and signing
in users, redirecting etc.
"""
def import_attribute(self, path):
"""
Import an attribute from a module.
"""
module = '.'.join(path.split('.')[:-1])
function = path.split('.')[-1]
module = importlib.import_module(module)
return getattr(module, function)
def get_next(self, request):
"""
Returns a url to redirect to after the login / signup.
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next']
elif 'next' in request.GET:
next = request.GET.get('next')
elif 'next' in request.POST:
next = request.POST.get('next')
else:
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
netloc = urlparse.urlparse(next)[1]
if netloc and netloc != request.get_host():
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
return next
def authenticate(self, **kwargs):
"""
Authenticate a user against all configured authentication backends.
"""
return authenticate(**kwargs)
def login(self, request, user):
"""
Sign a user in.
"""
return login(request, user)
def inactive_response(self, request):
"""
Return an inactive message.
"""
inactive_url = getattr(settings, 'LOGIN_INACTIVE_REDIRECT_URL', '')
if inactive_url:
return HttpResponseRedirect(inactive_url)
else:
return self.error_to_response(request, {'error': _("This user account is marked as inactive.")})
def redirect(self, request):
"""
Redirect the user back to the ``next`` session/request variable.
"""
return HttpResponseRedirect(self.get_next(request))
class ClientMixin(object):
"""
Views such as ``OAuthRedirectView`` require a client to work with. This is
the interface to it.
"""
#: The client class we'll be working with
client = None
def get_client(self):
"""
Return the client class or raise an ``AttributeError`` if
``self.client`` is not set.
"""
if self.client is None:
raise AttributeError('`self.client` is `None`')
return self.client
class ProfileMixin(object):
"""
Views such as ``SetupCallback`` require a profile model to work with. This is
the interface to it.
"""
#: The profile model that we'll be working with
profile = None
def get_lookup_kwargs(self, request, client):
"""
Return a dictionary to look up a profile object.
"""
raise NotImplementedError
def get_model(self):
"""
Return the profile model or raise an ``AttributeError``
if ``self.profile`` is not set.
"""
if self.profile is None:
raise AttributeError('`self.profile` is `None`')
return self.profile
def create_user(self):
"""
Create and return an empty user model.
"""
return User()
def create_profile(self, user, save=False, **kwargs):
"""
Create a profile model.
:param user: A user object
:param save: If this is set, the profile will
be saved to DB straight away
:type save: bool
"""
profile = self.get_model()(user=user, **kwargs)
if save:
profile.save()
return profile
def get_profile(self, **kwargs):
"""
Return a profile object
"""
return self.get_model().objects.get(**kwargs)
def get_or_create_profile(self, user, save=False, **kwargs):
"""
Return a profile from DB or if there is none, create a new one.
:param user: A user object
:param save: If set, a new profile will be saved.
:type save: bool
"""
try:
profile = self.get_model().objects.get(user=user, **kwargs)
return profile, False
except self.get_model().DoesNotExist:
profile = self.create_profile(user, save=save, **kwargs)
return profile, True
class SessionMixin(object):
"""
When a new user is signing up the user and profile models and api client
need to be carried accross two views via session. This mixin handles
storage, retrieval and cleanup of said values.
"""
def store_profile(self, request, profile):
"""
Store the profile data to the session
"""
request.session['%sprofile' % SESSION_KEY] = profile
def store_user(self, request, user):
"""
Store the user data to the session
"""
request.session['%suser' % SESSION_KEY] = user
def store_client(self, request, client):
"""
Store the client to the session
"""
request.session['%sclient' % SESSION_KEY] = client
def get_session_data(self, request):
"""
Return a tuple ``(user, profile, client)`` from the session.
"""
user = request.session['%suser' % SESSION_KEY]
profile = request.session['%sprofile' % SESSION_KEY]
client = request.session['%sclient' % SESSION_KEY]
return user, profile, client
def delete_session_data(self, request):
"""
Clear all session data.
"""
for key in ['user', 'profile', 'client']:
try: del request.session['%s%s' % (SESSION_KEY, key)]
except KeyError: pass
class SignalMixin(object):
"""
When signing users up or signing users in we need to send out signals to
notify other parts of the code. This mixin provides an interface for sending
the signals.
"""
def send_login_signal(self, request, user, profile, client):
"""
Send a signal that a user logged in. This signal should be sent only if
the user was *not* logged into Django.
"""
signals.login.send(sender=profile.__class__, user=user,
profile=profile, client=client, request=request)
def send_connect_signal(self, request, user, profile, client):
"""
Send a signal that a user connected a social profile to his Django
account. This signal should be sent *only* when the a new social
connection was created.
"""
signals.connect.send(sender=profile.__class__, user=user, profile=profile,
client=client, request=request)
class ErrorMixin(object):
def error_to_response(self, request, error_dict, **context):
if ERROR_VIEW:
return self.import_attribute(ERROR_VIEW)(request, error_dict, **context)
return self.render_to_response(error_dict, **context)
class SocialRegistration(CommonMixin, ClientMixin, ProfileMixin, SessionMixin,
SignalMixin, ErrorMixin):
"""
Combine all mixins into a single class.
"""
pass | unknown | codeparrot/codeparrot-clean | ||
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
# 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring",
"iselement", "iterparse",
"parse",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring",
"TreeBuilder",
"VERSION", "XML",
"XMLTreeBuilder",
]
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} or {@link
# #SubElement} factory functions.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import string, sys, re
class _SimpleElementPath:
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None):
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
def findall(self, element, tag):
if tag[:3] == ".//":
return element.getiterator(tag[3:])
result = []
for elem in element:
if elem.tag == tag:
result.append(elem)
return result
try:
import ElementPath
except ImportError:
# FIXME: issue warning in this case?
ElementPath = _SimpleElementPath()
# TODO: add support for custom namespace resolvers/default namespaces
# TODO: add improved support for incremental parsing
VERSION = "1.2.6"
##
# Internal element class. This class defines the Element interface,
# and provides a reference implementation of this interface.
# <p>
# You should not create instances of this class directly. Use the
# appropriate factory functions instead, such as {@link #Element}
# and {@link #SubElement}.
#
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class _ElementInterface:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #_ElementInterface.get},
# {@link #_ElementInterface.set},
# {@link #_ElementInterface.keys}, and
# {@link #_ElementInterface.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None, if there was no text.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None, if there was no text.
tail = None # text after end tag, if any
def __init__(self, tag, attrib):
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at %x>" % (self.tag, id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return Element(tag, attrib)
##
# Returns the number of subelements.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
##
# Returns the given subelement.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
# @exception AssertionError If element is not a valid object.
def __setitem__(self, index, element):
assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Returns a list containing subelements in the given range.
#
# @param start The first subelement to return.
# @param stop The first subelement that shouldn't be returned.
# @return A sequence object containing subelements.
def __getslice__(self, start, stop):
return self._children[start:stop]
##
# Replaces a number of subelements with elements from a sequence.
#
# @param start The first subelement to replace.
# @param stop The first subelement that shouldn't be replaced.
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a sequence member is not a valid object.
def __setslice__(self, start, stop, elements):
for element in elements:
assert iselement(element)
self._children[start:stop] = list(elements)
##
# Deletes a number of subelements.
#
# @param start The first subelement to delete.
# @param stop The first subelement to leave in there.
def __delslice__(self, start, stop):
del self._children[start:stop]
##
# Adds a subelement to the end of this element.
#
# @param element The element to add.
# @exception AssertionError If a sequence member is not a valid object.
def append(self, element):
assert iselement(element)
self._children.append(element)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
# @exception AssertionError If the element is not a valid object.
def insert(self, index, element):
assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
# @exception AssertionError If the element is not a valid object.
def remove(self, element):
assert iselement(element)
self._children.remove(element)
##
# Returns all subelements. The elements are returned in document
# order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
return ElementPath.find(self, path)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
return ElementPath.findtext(self, path, default)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
return ElementPath.findall(self, path)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the text and tail attributes to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, the result
# is undefined.
#
# @param tag What tags to look for (default is to return all elements).
# @return A list or iterator containing all the matching elements.
# @defreturn list or iterator
def getiterator(self, tag=None):
nodes = []
if tag == "*":
tag = None
if tag is None or self.tag == tag:
nodes.append(self)
for node in self._children:
nodes.extend(node.getiterator(tag))
return nodes
# compatibility
_Element = _ElementInterface
##
# Element factory. This function returns an object implementing the
# standard Element interface. The exact class or type of that object
# is implementation dependent, but it will always be compatible with
# the {@link #_ElementInterface} class in this module.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def Element(tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
return _ElementInterface(tag, attrib)
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return The document root element.
# @defreturn Element
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLTreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def getiterator(self, tag=None):
assert self._root is not None
return self._root.getiterator(tag)
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.find(path)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findtext(path, default)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findall(path)
##
# Writes the element tree to a file, as XML.
#
# @param file A file name, or a file object opened for writing.
# @param encoding Optional output encoding (default is US-ASCII).
def write(self, file, encoding="us-ascii"):
assert self._root is not None
if not hasattr(file, "write"):
file = open(file, "wb")
if not encoding:
encoding = "us-ascii"
elif encoding != "utf-8" and encoding != "us-ascii":
file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
self._write(file, self._root, encoding, {})
def _write(self, file, node, encoding, namespaces):
# write XML to file
tag = node.tag
if tag is Comment:
file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
elif tag is ProcessingInstruction:
file.write("<?%s?>" % _escape_cdata(node.text, encoding))
else:
items = node.items()
xmlns_items = [] # new namespaces in this scope
try:
if isinstance(tag, QName) or tag[:1] == "{":
tag, xmlns = fixtag(tag, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(tag)
file.write("<" + _encode(tag, encoding))
if items or xmlns_items:
items.sort() # lexical order
for k, v in items:
try:
if isinstance(k, QName) or k[:1] == "{":
k, xmlns = fixtag(k, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(k)
try:
if isinstance(v, QName):
v, xmlns = fixtag(v, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(v)
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
for k, v in xmlns_items:
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
if node.text or len(node):
file.write(">")
if node.text:
file.write(_escape_cdata(node.text, encoding))
for n in node:
self._write(file, n, encoding, namespaces)
file.write("</" + _encode(tag, encoding) + ">")
else:
file.write(" />")
for k, v in xmlns_items:
del namespaces[v]
if node.tail:
file.write(_escape_cdata(node.tail, encoding))
# --------------------------------------------------------------------
# helpers
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, _ElementInterface) or hasattr(element, "tag")
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def _encode(s, encoding):
try:
return s.encode(encoding)
except AttributeError:
return s # 1.5.2: assume the string uses the right encoding
if sys.version[:3] == "1.5":
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
_escape_map = {
"&": "&",
"<": "<",
">": ">",
'"': """,
}
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m, map=_escape_map):
out = []
append = out.append
for char in m.group():
text = map.get(char)
if text is None:
text = "&#%d;" % ord(char)
append(text)
return string.join(out, "")
try:
return _encode(pattern.sub(escape_entities, text), "ascii")
except TypeError:
_raise_serialization_error(text)
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def _escape_cdata(text, encoding=None, replace=string.replace):
# escape character data
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding=None, replace=string.replace):
# escape attribute value
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "'", "'") # FIXME: overkill
text = replace(text, "\"", """)
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def fixtag(tag, namespaces):
# given a decorated tag (of the form {uri}tag), return prefixed
# tag and namespace declaration, if any
if isinstance(tag, QName):
tag = tag.text
namespace_uri, tag = string.split(tag[1:], "}", 1)
prefix = namespaces.get(namespace_uri)
if prefix is None:
prefix = _namespace_map.get(namespace_uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
namespaces[namespace_uri] = prefix
if prefix == "xml":
xmlns = None
else:
xmlns = ("xmlns:%s" % prefix, namespace_uri)
else:
xmlns = None
return "%s:%s" % (prefix, tag), xmlns
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @return A (event, elem) iterator.
class iterparse:
def __init__(self, source, events=None):
if not hasattr(source, "read"):
source = open(source, "rb")
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = XMLTreeBuilder()
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = _encode(uri, "ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri)))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
try:
raise StopIteration
except NameError:
raise IndexError
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
try:
iter
def __iter__(self):
return self
except NameError:
def __getitem__(self, index):
return self.next()
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
def XML(text):
parser = XMLTreeBuilder()
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text):
parser = XMLTreeBuilder()
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.getiterator():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding)
return string.join(data, "")
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = _ElementInterface
self._factory = element_factory
##
# Flushes the parser buffers, and returns the toplevel documen
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last != None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = string.join(self._data, "")
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @see #ElementTree
# @see #TreeBuilder
class XMLTreeBuilder:
def __init__(self, html=0, target=None):
try:
from xml.parsers import expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
self._parser = parser = expat.ParserCreate(None, "}")
if target is None:
target = TreeBuilder()
self._target = target
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
encoding = None
if not parser.returns_unicode:
encoding = "utf-8"
# target.xml(encoding, None)
self._doctype = None
self.entity = {}
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return _encode(text, "ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = self._fixtext(value)
return self._target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
return self._target.start(tag, attrib)
def _data(self, text):
return self._target.data(self._fixtext(text))
def _end(self, tag):
return self._target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self._target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
raise expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = string.strip(text)
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
pass
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
self._parser.Parse(data, 0)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
self._parser.Parse("", 1) # end of data
tree = self._target.close()
del self._target, self._parser # get rid of circular references
return tree | unknown | codeparrot/codeparrot-clean | ||
#-*- coding: utf-8 -*-
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ObjectDoesNotExist
from shop.models import AddressModel
#===============================================================================
# Addresses handling
#===============================================================================
def get_shipping_address_from_request(request):
"""
Get the shipping address from the request. This abstracts the fact that users
can be either registered (and thus, logged in), or only session-based guests
"""
shipping_address = None
if request.user and not isinstance(request.user, AnonymousUser):
# There is a logged-in user here, but he might not have an address defined.
try:
shipping_address = AddressModel.objects.get(user_shipping=request.user)
except AddressModel.DoesNotExist:
shipping_address = None
else:
# The client is a guest - let's use the session instead.
session = getattr(request, 'session', None)
shipping_address = None
session_address_id = session.get('shipping_address_id')
if session != None and session_address_id:
shipping_address = AddressModel.objects.get(pk=session_address_id)
return shipping_address
def get_billing_address_from_request(request):
"""
Get the billing address from the request. This abstracts the fact that users
can be either registered (and thus, logged in), or only session-based guests
"""
billing_address = None
if request.user and not isinstance(request.user, AnonymousUser):
# There is a logged-in user here, but he might not have an address defined.
try:
billing_address = AddressModel.objects.get(user_billing=request.user)
except AddressModel.DoesNotExist:
billing_address = None
else:
# The client is a guest - let's use the session instead.
session = getattr(request, 'session', None)
session_billing_id = session.get('billing_address_id')
if session != None and session_billing_id:
billing_address = AddressModel.objects.get(pk=session_billing_id)
return billing_address
def assign_address_to_request(request, address, shipping=True):
"""
Sets the passed address as either the shipping or the billing address for the
passed request.
This abstracts the difference between logged-in users and session-based guests.
The `shipping` parameter controls whether the address is a shipping address
(default) or a billing address.
"""
if request.user and not isinstance(request.user, AnonymousUser):
# There is a logged-in user here.
if shipping:
address.user_shipping = request.user
address.save()
else:
address.user_billing = request.user
address.save()
else:
# The client is a guest - let's use the session instead.
# There has to be a session. Otherwise it's fine to get an AttributeError
if shipping:
request.session['shipping_address_id'] = address.pk
else:
request.session['billing_address_id'] = address.pk
def get_user_name_from_request(request):
"""
Simple helper to return the username from the request, or '' if the user is
AnonymousUser.
"""
name = ''
if request.user and not isinstance(request.user, AnonymousUser):
name = request.user.get_full_name() # TODO: Administrators!
return name | unknown | codeparrot/codeparrot-clean | ||
.d.svelte-xyz ~ .e:where(.svelte-xyz) { color: green; }
.a.svelte-xyz ~ .g:where(.svelte-xyz) { color: green; }
.a.svelte-xyz ~ .b:where(.svelte-xyz) { color: green; }
.f.svelte-xyz ~ .g:where(.svelte-xyz) { color: green; }
.b.svelte-xyz ~ .g:where(.svelte-xyz) { color: green; }
/* no match */
/* (unused) .b ~ .c { color: red; }*/
/* (unused) .c ~ .f { color: red; }*/
/* (unused) .b ~ .f { color: red; }*/ | css | github | https://github.com/sveltejs/svelte | packages/svelte/tests/css/samples/general-siblings-combinator-slot/expected.css |
""" Tests for tab functions (just primitive). """
import json
from contentstore.views import tabs
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from xmodule.x_module import STUDENT_VIEW
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.tabs import CourseTabList
from xmodule.modulestore.django import modulestore
class TabsPageTests(CourseTestCase):
"""Test cases for Tabs (a.k.a Pages) page"""
def setUp(self):
"""Common setup for tests"""
# call super class to setup course, etc.
super(TabsPageTests, self).setUp()
# Set the URL for tests
self.url = reverse_course_url('tabs_handler', self.course.id)
# add a static tab to the course, for code coverage
self.test_tab = ItemFactory.create(
parent_location=self.course.location,
category="static_tab",
display_name="Static_1"
)
self.reload_course()
def check_invalid_tab_id_response(self, resp):
"""Verify response is an error listing the invalid_tab_id"""
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content)
self.assertIn("error", resp_content)
self.assertIn("invalid_tab_id", resp_content['error'])
def test_not_implemented(self):
"""Verify not implemented errors"""
# JSON GET request not supported
with self.assertRaises(NotImplementedError):
self.client.get(self.url)
# JSON POST request not supported
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'courseware'},
'unsupported_request': None,
}),
)
# invalid JSON POST request
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data={'invalid_request': None},
)
def test_view_index(self):
"""Basic check that the Pages page responds correctly"""
resp = self.client.get_html(self.url)
self.assertEqual(resp.status_code, 200)
self.assertIn('course-nav-list', resp.content)
def test_reorder_tabs(self):
"""Test re-ordering of tabs"""
# get the original tab ids
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
num_orig_tabs = len(orig_tab_ids)
# make sure we have enough tabs to play around with
self.assertTrue(num_orig_tabs >= 5)
# reorder the last two tabs
tab_ids[num_orig_tabs - 1], tab_ids[num_orig_tabs - 2] = tab_ids[num_orig_tabs - 2], tab_ids[num_orig_tabs - 1]
# remove the middle tab
# (the code needs to handle the case where tabs requested for re-ordering is a subset of the tabs in the course)
removed_tab = tab_ids.pop(num_orig_tabs / 2)
self.assertTrue(len(tab_ids) == num_orig_tabs - 1)
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new tab order
self.reload_course()
new_tab_ids = [tab.tab_id for tab in self.course.tabs]
self.assertEqual(new_tab_ids, tab_ids + [removed_tab])
self.assertNotEqual(new_tab_ids, orig_tab_ids)
def test_reorder_tabs_invalid_list(self):
"""Test re-ordering of tabs with invalid tab list"""
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
# reorder the first two tabs
tab_ids[0], tab_ids[1] = tab_ids[1], tab_ids[0]
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content)
self.assertIn("error", resp_content)
def test_reorder_tabs_invalid_tab(self):
"""Test re-ordering of tabs with invalid tab"""
invalid_tab_ids = ['courseware', 'info', 'invalid_tab_id']
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in invalid_tab_ids]},
)
self.check_invalid_tab_id_response(resp)
def check_toggle_tab_visiblity(self, tab_type, new_is_hidden_setting):
"""Helper method to check changes in tab visibility"""
# find the tab
old_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
# visibility should be different from new setting
self.assertNotEqual(old_tab.is_hidden, new_is_hidden_setting)
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': old_tab.tab_id},
'is_hidden': new_is_hidden_setting,
}),
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new visibility setting
self.reload_course()
new_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
self.assertEqual(new_tab.is_hidden, new_is_hidden_setting)
def test_toggle_tab_visibility(self):
"""Test toggling of tab visibility"""
self.check_toggle_tab_visiblity('wiki', True)
self.check_toggle_tab_visiblity('wiki', False)
def test_toggle_invalid_tab_visibility(self):
"""Test toggling visibility of an invalid tab"""
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'invalid_tab_id'}
}),
)
self.check_invalid_tab_id_response(resp)
def test_tab_preview_html(self):
"""
Verify that the static tab renders itself with the correct HTML
"""
preview_url = '/xblock/{}/{}'.format(self.test_tab.location, STUDENT_VIEW)
resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content)
html = resp_content['html']
# Verify that the HTML contains the expected elements
self.assertIn('<span class="action-button-text">Edit</span>', html)
self.assertIn('<span class="sr">Duplicate this component</span>', html)
self.assertIn('<span class="sr">Delete this component</span>', html)
self.assertIn('<span data-tooltip="Drag to reorder" class="drag-handle action"></span>', html)
class PrimitiveTabEdit(ModuleStoreTestCase):
"""Tests for the primitive tab edit data manipulations"""
def test_delete(self):
"""Test primitive tab deletion."""
course = CourseFactory.create()
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 0)
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 1)
with self.assertRaises(IndexError):
tabs.primitive_delete(course, 6)
tabs.primitive_delete(course, 2)
self.assertFalse({u'type': u'textbooks'} in course.tabs)
# Check that discussion has shifted up
self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})
def test_insert(self):
"""Test primitive tab insertion."""
course = CourseFactory.create()
tabs.primitive_insert(course, 2, 'notes', 'aname')
self.assertEquals(course.tabs[2], {'type': 'notes', 'name': 'aname'})
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 0, 'notes', 'aname')
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 3, 'static_tab', 'aname')
def test_save(self):
"""Test course saving."""
course = CourseFactory.create()
tabs.primitive_insert(course, 3, 'notes', 'aname')
course2 = modulestore().get_course(course.id)
self.assertEquals(course2.tabs[3], {'type': 'notes', 'name': 'aname'}) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from django.db import models
import jsonschema
from website.util import api_v2_url
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.exceptions import ValidationValueError
from website.project.metadata.utils import create_jsonschema_from_metaschema
class MetaSchema(ObjectIDMixin, BaseModel):
name = models.CharField(max_length=255)
schema = DateTimeAwareJSONField(default=dict)
category = models.CharField(max_length=255, null=True, blank=True)
active = models.BooleanField(default=True)
# Version of the schema to use (e.g. if questions, responses change)
schema_version = models.IntegerField()
class Meta:
unique_together = ('name', 'schema_version')
def __unicode__(self):
return '(name={}, schema_version={}, id={})'.format(self.name, self.schema_version, self.id)
@property
def _config(self):
return self.schema.get('config', {})
@property
def requires_approval(self):
return self._config.get('requiresApproval', False)
@property
def fulfills(self):
return self._config.get('fulfills', [])
@property
def messages(self):
return self._config.get('messages', {})
@property
def requires_consent(self):
return self._config.get('requiresConsent', False)
@property
def has_files(self):
return self._config.get('hasFiles', False)
@property
def absolute_api_v2_url(self):
path = '/metaschemas/{}/'.format(self._id)
return api_v2_url(path)
@classmethod
def get_prereg_schema(cls):
return cls.objects.get(
name='Prereg Challenge',
schema_version=2
)
def validate_metadata(self, metadata, reviewer=False, required_fields=False):
"""
Validates registration_metadata field.
"""
schema = create_jsonschema_from_metaschema(self.schema,
required_fields=required_fields,
is_reviewer=reviewer)
try:
jsonschema.validate(metadata, schema)
except jsonschema.ValidationError as e:
raise ValidationValueError(e.message)
except jsonschema.SchemaError as e:
raise ValidationValueError(e.message)
return | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
class PostgresqlExtensionMigrationTest < ActiveRecord::PostgreSQLTestCase
self.use_transactional_tests = false
class EnableHstore < ActiveRecord::Migration::Current
def change
enable_extension "hstore"
end
end
class DisableHstore < ActiveRecord::Migration::Current
def change
disable_extension "hstore"
end
end
class EnableHstoreInSchema < ActiveRecord::Migration::Current
def change
enable_extension "other_schema.hstore"
end
end
def setup
super
@connection = ActiveRecord::Base.lease_connection
@pool = ActiveRecord::Base.connection_pool
@old_table_name_prefix = ActiveRecord::Base.table_name_prefix
@old_table_name_suffix = ActiveRecord::Base.table_name_suffix
ActiveRecord::Base.table_name_prefix = "p_"
ActiveRecord::Base.table_name_suffix = "_s"
@pool.schema_migration.delete_all_versions rescue nil
ActiveRecord::Migration.verbose = false
end
def teardown
@pool.schema_migration.delete_all_versions rescue nil
ActiveRecord::Migration.verbose = true
ActiveRecord::Base.table_name_prefix = @old_table_name_prefix
ActiveRecord::Base.table_name_suffix = @old_table_name_suffix
super
end
def test_enable_extension_migration_ignores_prefix_and_suffix
@connection.disable_extension("hstore")
migrations = [EnableHstore.new(nil, 1)]
ActiveRecord::Migrator.new(:up, migrations, @pool.schema_migration, @pool.internal_metadata).migrate
assert @connection.extension_enabled?("hstore"), "extension hstore should be enabled"
end
def test_enable_extension_migration_with_schema
@connection.disable_extension("hstore")
@connection.create_schema "other_schema"
migrations = [EnableHstoreInSchema.new(nil, 1)]
ActiveRecord::Migrator.new(:up, migrations, @pool.schema_migration, @pool.internal_metadata).migrate
assert @connection.extension_enabled?("hstore"), "extension hstore should be enabled"
ensure
@connection.drop_schema "other_schema", if_exists: true
end
def test_disable_extension_migration_ignores_prefix_and_suffix
@connection.enable_extension("hstore")
migrations = [DisableHstore.new(nil, 1)]
ActiveRecord::Migrator.new(:up, migrations, @pool.schema_migration, @pool.internal_metadata).migrate
assert_not @connection.extension_enabled?("hstore"), "extension hstore should not be enabled"
end
def test_disable_extension_raises_when_dependent_objects_exist
@connection.enable_extension("hstore")
@connection.create_table(:hstores) do |t|
t.hstore :settings
end
error = assert_raises(StandardError) do
@connection.disable_extension(:hstore)
end
assert_match(/cannot drop extension hstore because other objects depend on it/i, error.message)
ensure
@connection.drop_table(:hstores, if_exists: true)
end
def test_disable_extension_drops_extension_when_cascading
@connection.enable_extension("hstore")
@connection.create_table(:hstores) do |t|
t.hstore :settings
end
@connection.disable_extension(:hstore, force: :cascade)
assert_not @connection.extension_enabled?("hstore"), "extension hstore should not be enabled"
ensure
@connection.drop_table(:hstores, if_exists: true)
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/adapters/postgresql/extension_migration_test.rb |
from . import main
if __name__ == "__main__":
main() | python | github | https://github.com/python/cpython | Lib/zipfile/__main__.py |
# Design patterns for AI SDKs and signal APIs
Interacting with AI and Large Language Model (LLM) APIs introduces unique challenges, such as managing asynchronous operations, handling streaming data, and designing a responsive user experience for potentially slow or unreliable network requests. Angular [signals](guide/signals) and the [`resource`](guide/signals/resource) API provide powerful tools to solve these problems elegantly.
## Triggering requests with signals
A common pattern when working with user-provided prompts is to separate the user's live input from the submitted value that triggers the API call.
1. Store the user's raw input in one signal as they type
2. When the user submits (e.g., by clicking a button), update a second signal with contents of the first signal.
3. Use the second signal in the **`params`** field of your `resource`.
This setup ensures the resource's **`loader`** function only runs when the user explicitly submits their prompt, not on every keystroke. You can use additional signal parameters, like a `sessionId` or `userId` (which can be useful for creating persistent LLM sessions), in the `loader` field. This way, the request always uses these parameters' current values without re-triggering the asynchronous function defined in the `loader` field.
Many AI SDKs provide helper methods for making API calls. For example, the Genkit client library exposes a `runFlow` method for calling Genkit flows, which you can call from a resource's `loader`. For other APIs, you can use the [`httpResource`](guide/signals/resource#reactive-data-fetching-with-httpresource).
The following example shows a `resource` that fetches parts of an AI-generated story. The `loader` is triggered only when the `storyInput` signal changes.
```ts
// A resource that fetches three parts of an AI generated story
storyResource = resource({
// The default value to use before the first request or on error
defaultValue: DEFAULT_STORY,
// The loader is re-triggered when this signal changes
params: () => this.storyInput(),
// The async function to fetch data
loader: ({params}): Promise<StoryData> => {
// The params value is the current value of the storyInput signal
const url = this.endpoint();
return runFlow({
url,
input: {
userInput: params,
sessionId: this.storyService.sessionId(), // Read from another signal
},
});
},
});
```
## Preparing LLM data for templates
You can configure LLM APIs to return structured data. Strongly typing your `resource` to match the expected output from the LLM provides better type safety and editor autocompletion.
To manage state derived from a resource, use a `computed` signal or `linkedSignal`. Because `linkedSignal` [provides access to prior values](guide/signals/linked-signal), it can serve a variety of AI-related use cases, including
- building a chat history
- preserving or customizing data that templates display while LLMs generate content
In the example below, `storyParts` is a `linkedSignal` that appends the latest story parts returned from `storyResource` to the existing array of story parts.
```ts
storyParts = linkedSignal<string[], string[]>({
// The source signal that triggers the computation
source: () => this.storyResource.value().storyParts,
// The computation function
computation: (newStoryParts, previous) => {
// Get the previous value of this linkedSignal, or an empty array
const existingStoryParts = previous?.value || [];
// Return a new array with the old and new parts
return [...existingStoryParts, ...newStoryParts];
},
});
```
## Performance and user experience
LLM APIs may be slower and more error-prone than conventional, more deterministic APIs. You can use several Angular features to build a performant and user-friendly interface.
- **Scoped Loading:** place the `resource` in the component that directly uses the data. This helps limit change detection cycles (especially in zoneless applications) and prevents blocking other parts of your application. If data needs to be shared across multiple components, provide the `resource` from a service.
- **SSR and Hydration:** use Server-Side Rendering (SSR) with incremental hydration to render the initial page content quickly. You can show a placeholder for the AI-generated content and defer fetching the data until the component hydrates on the client.
- **Loading State:** use the `resource` `LOADING` [status](guide/signals/resource#resource-status) to show an indicator, like a spinner, while the request is in flight. This status covers both initial loads and reloads.
- **Error Handling and Retries:** use the `resource` [**`reload()`**](guide/signals/resource#reloading) method as a simple way for users to retry failed requests, may be more prevalent when relying on AI generated content.
The following example demonstrates how to create a responsive UI to dynamically display an AI generated image with loading and retry functionality.
```angular-html
<!-- Display a loading spinner while the LLM generates the image -->
@if (imgResource.isLoading()) {
<div class="img-placeholder">
<mat-spinner [diameter]="50" />
</div>
<!-- Dynamically populates the src attribute with the generated image URL -->
} @else if (imgResource.hasValue()) {
<img [src]="imgResource.value()" />
<!-- Provides a retry option if the request fails -->
} @else {
<div class="img-placeholder" (click)="imgResource.reload()">
<mat-icon fontIcon="refresh" />
<p>Failed to load image. Click to retry.</p>
</div>
}
```
## AI patterns in action: streaming chat responses
Interfaces often display partial results from LLM-based APIs incrementally as response data arrives. Angular's resource API provides the ability to stream responses to support this type of pattern. The `stream` property of `resource` accepts an asynchronous function you can use to apply updates to a signal value over time. The signal being updated represents the data being streamed.
```ts
characters = resource({
stream: async () => {
const data = signal<ResourceStreamItem<string>>({value: ''});
// Calls a Genkit streaming flow using the streamFlow method
// exposed by the Genkit client SDK
const response = streamFlow({
url: '/streamCharacters',
input: 10,
});
(async () => {
for await (const chunk of response.stream) {
data.update((prev) => {
if ('value' in prev) {
return {value: `${prev.value} ${chunk}`};
} else {
return {error: chunk as unknown as Error};
}
});
}
})();
return data;
},
});
```
The `characters` member is updated asynchronously and can be displayed in the template.
```angular-html
@if (characters.isLoading()) {
<p>Loading...</p>
} @else if (characters.hasValue()) {
<p>{{ characters.value() }}</p>
} @else {
<p>{{ characters.error() }}</p>
}
```
On the server side, in `server.ts` for example, the defined endpoint sends the data to be streamed to the client. The following code uses Gemini with the Genkit framework but this technique is applicable to other APIs that support streaming responses from LLMs:
```ts
import {startFlowServer} from '@genkit-ai/express';
import {genkit} from 'genkit/beta';
import {googleAI, gemini20Flash} from '@genkit-ai/googleai';
const ai = genkit({plugins: [googleAI()]});
export const streamCharacters = ai.defineFlow(
{
name: 'streamCharacters',
inputSchema: z.number(),
outputSchema: z.string(),
streamSchema: z.string(),
},
async (count, {sendChunk}) => {
const {response, stream} = ai.generateStream({
model: gemini20Flash,
config: {
temperature: 1,
},
prompt: `Generate ${count} different RPG game characters.`,
});
(async () => {
for await (const chunk of stream) {
sendChunk(chunk.content[0].text!);
}
})();
return (await response).text;
},
);
startFlowServer({
flows: [streamCharacters],
});
``` | unknown | github | https://github.com/angular/angular | adev/src/content/ai/design-patterns.md |
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table) | unknown | codeparrot/codeparrot-clean | ||
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
from PIL import ImageOps
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Flip(BaseFilter):
"""
Flips the image.
"""
def __init__(self, difficulty = 0.5, seed=None, reproducible=False):
"""
@param seed -- Seed value for random number generator, to produce
reproducible results.
@param reproducible -- Whether to seed the random number generator based
on a hash of the image pixels upon each call to process().
'seed' and 'reproducible' cannot be used together.
"""
BaseFilter.__init__(self, seed, reproducible)
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
newImage = ImageOps.flip(image)
return newImage | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.