text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 19.01.2015
@author: marscher
'''
from __future__ import absolute_import
import os
import tempfile
import unittest
import mdtraj
import numpy as np
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.element import hydrogen, oxygen
from mdtraj.core.topology import Topology
from pyemma.coordinates.clustering.uniform_time import UniformTimeClustering
from pyemma.coordinates.pipelines import Discretizer
from pyemma.coordinates.data.data_in_memory import DataInMemory
from pyemma.coordinates.api import cluster_kmeans, pca, source
def create_water_topology_on_disc(n):
topfile = tempfile.mktemp('.pdb')
top = Topology()
chain = top.add_chain()
for i in range(n):
res = top.add_residue('r%i' % i, chain)
h1 = top.add_atom('H', hydrogen, res)
o = top.add_atom('O', oxygen, res)
h2 = top.add_atom('H', hydrogen, res)
top.add_bond(h1, o)
top.add_bond(h2, o)
xyz = np.zeros((n * 3, 3))
Trajectory(xyz, top).save_pdb(topfile)
return topfile
def create_traj_on_disc(topfile, n_frames, n_atoms):
fn = tempfile.mktemp('.xtc')
xyz = np.random.random((n_frames, n_atoms, 3))
t = mdtraj.load(topfile)
t.xyz = xyz
t.time = np.arange(n_frames)
t.save(fn)
return fn
class TestDiscretizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
c = super(TestDiscretizer, cls).setUpClass()
# create a fake trajectory which has 2 atoms and coordinates are just a range
# over all frames.
cls.n_frames = 1000
cls.n_residues = 30
cls.topfile = create_water_topology_on_disc(cls.n_residues)
# create some trajectories
t1 = create_traj_on_disc(
cls.topfile, cls.n_frames, cls.n_residues * 3)
t2 = create_traj_on_disc(
cls.topfile, cls.n_frames, cls.n_residues * 3)
cls.trajfiles = [t1, t2]
cls.dest_dir = tempfile.mkdtemp()
return c
@classmethod
def tearDownClass(cls):
"""delete temporary files"""
os.unlink(cls.topfile)
for f in cls.trajfiles:
os.unlink(f)
import shutil
shutil.rmtree(cls.dest_dir, ignore_errors=True)
def test(self):
reader = source(self.trajfiles, top=self.topfile)
pcat = pca(dim=2)
n_clusters = 2
clustering = UniformTimeClustering(n_clusters=n_clusters)
D = Discretizer(reader, transform=pcat, cluster=clustering)
D.parametrize()
self.assertEqual(len(D.dtrajs), len(self.trajfiles))
for dtraj in clustering.dtrajs:
unique = np.unique(dtraj)
self.assertEqual(unique.shape[0], n_clusters)
def test_with_data_in_mem(self):
import pyemma.coordinates as api
data = [np.random.random((100, 50)),
np.random.random((103, 50)),
np.random.random((33, 50))]
reader = source(data)
assert isinstance(reader, DataInMemory)
tpca = api.pca(dim=2)
n_centers = 10
km = api.cluster_kmeans(k=n_centers)
disc = api.discretizer(reader, tpca, km)
disc.parametrize()
dtrajs = disc.dtrajs
for dtraj in dtrajs:
n_states = np.max((np.unique(dtraj)))
self.assertGreaterEqual(n_centers - 1, n_states,
"dtraj has more states than cluster centers")
def test_save_dtrajs(self):
reader = source(self.trajfiles, top=self.topfile)
cluster = cluster_kmeans(k=2)
d = Discretizer(reader, cluster=cluster)
d.parametrize()
d.save_dtrajs(output_dir=self.dest_dir)
dtrajs = os.listdir(self.dest_dir)
if __name__ == "__main__":
unittest.main()
|
marscher/PyEMMA
|
pyemma/coordinates/tests/test_discretizer.py
|
Python
|
lgpl-3.0
| 4,563
|
[
"MDTraj"
] |
c1f0fade73b5a082e58f2ba4609b54da88a14caf305678a1bded455d5a562258
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import io
import os
import versioneer
VERSION = versioneer.get_version()
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(os.path.join(os.path.dirname(__file__), filename), encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst')
setup(
name='ommprotocol',
version=VERSION,
cmdclass=versioneer.get_cmdclass(),
url='https://github.com/insilichem/ommprotocol',
download_url='https://github.com/insilichem/ommprotocol/tarball/v' + VERSION,
license='LGPL',
author="Jaime Rodríguez-Guerra",
author_email='jaime.rogue@gmail.com',
description='Easy to deploy MD protocols for OpenMM',
long_description=long_description,
packages=find_packages(),
package_data={'': ['../examples/*.yaml']},
platforms='any',
classifiers=[
'Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Chemistry',
],
entry_points='''
[console_scripts]
ommprotocol=ommprotocol:run_protocol
ommanalyze=ommprotocol.analyze:main
state2pdb=ommprotocol:state_to_pdb
exportframe=ommprotocol:export_frame
''',
)
|
insilichem/ommprotocol
|
setup.py
|
Python
|
lgpl-3.0
| 1,700
|
[
"OpenMM"
] |
864cab5122de4f7da3a5f0a996a15e448a6ccebec6a2451bd1f4fd1cbf15f8d5
|
"""
Basic SparseCFProjection with associated sparse CFs and output,
response, and learning function. If sparse component cannot be imported,
SparseCFProjection will fall back to a basic dense CFProjection.
CFSOF and CFSLF Plugin function allow any single CF output function to
be applied to the sparse CFs, but may suffer a serious performance
loss. For real work, such functions should be implemented at the
Cython or C++ level.
"""
import numpy as np
import math
from scipy.ndimage.filters import gaussian_filter
import param
from copy import copy
import topo
from topo.base.cf import CFProjection, NullCFError, _create_mask, simple_vectorize
from topo import pattern
from imagen import patterngenerator
from imagen.patterngenerator import PatternGenerator
from topo.base.functionfamily import TransferFn, IdentityTF
from topo.base.functionfamily import LearningFn, Hebbian
from topo.base.functionfamily import ResponseFn, DotProduct
from topo.base.sheetcoords import Slice
use_sparse = True
try:
import sparse
except:
use_sparse = False
sparse_type = np.float32
class CFSPLF_Plugin(param.Parameterized):
"""CFSPLearningFunction applying the specified single_cf_fn to each Sparse CF."""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),doc="""
Accepts a LearningFn that will be applied to each CF individually.""")
def constant_sum_connection_rate(self,n_units,learning_rate):
"""
Return the learning rate for a single connection assuming that
the total rate is to be divided evenly among all the units in
the connection field.
"""
return float(learning_rate)/n_units
def __call__(self, projection, **params):
"""Apply the specified single_cf_fn to every sparse CF."""
single_connection_learning_rate = self.constant_sum_connection_rate(projection.n_units,projection.learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.get_input_matrix(projection.src.activity),
projection.dest.activity.flat[cf.oned_idx], temp_weights,
single_connection_learning_rate)
temp_weights *= cf.mask
cf.weights = temp_weights
class CFSPOF_Plugin(param.Parameterized):
"""
Applies the specified single_cf_fn to each SparseCF in the SparseCFProjection.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),
doc="Accepts a TransferFn that will be applied to each CF individually.")
def __call__(self, projection, **params):
if type(self.single_cf_fn) is not IdentityTF:
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.weights)
cf.weights = temp_weights
del cf.norm_total
class CFSPOF_Prune(CFSPOF_Plugin):
"""
Prunes specified percentage of connections from CFs in SparseCFProjection
at specified interval.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval at which pruning step will be applied.""")
percentile = param.Number(default=10.0,bounds=(0,100),doc="""
Percentile boundary below which connections will be pruned.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
for cf in projection.flatcfs:
dim1,dim2 = cf.weights.shape
temp_weights = cf.weights
percentile = np.percentile(temp_weights[temp_weights.nonzero()],self.percentile)
temp_weights[np.where(temp_weights<=percentile)] = 0.0
cf.weights = temp_weights
projection.weights.prune()
self.message("%s has %f%% of initial connections" % (projection.name, (float(projection.n_conns())/self.initial_conns[projection.name])*100))
class CFSPOF_SproutRetract(CFSPOF_Plugin):
"""
Sprouting and retraction weights output function. At a preset time
interval, the function removes and adds connections based on a
piecewise function, which determines the number of connections to
alter and the sprouting and retraction ratios, eventually allowing
connections to converge on the target_sparsity. The function
ensures the full turnover_rate is applied at the maximal distances
from the target sparsity, i.e. at 0% and 100% density. As the
projection approaches the target sparsity, it will asymptote, but a
residual turnover will ensure that a fixed amount of connections
will continue to sprout and retract.
Retraction deletes the x lowest weights, while sprouting applies a
convolution with a Gaussian kernel to the existing connections,
growing connections at locations with the highest probabilities.
Still experimental and not scientifically validated.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval between sprout/retract steps.""")
residual_turnover = param.Number(default=0.01,bounds=(0,1.0),doc="""
Constant turnover rate independent of current sparsity.""")
turnover_rate = param.Number(default=0.1,bounds=(0,1.0),doc="""
Percentage of weights to change per interval, assuming
currently fully dense and target is fully sparse.""")
target_sparsity = param.Number(default=0.15,bounds=(0,1.0),doc="""
Sparsity level at which sprouting and retraction cancel out.""")
kernel_sigma = param.Number(default=1.0,bounds=(0.0,10.0),doc="""
Gaussian spatial variance for weights to diffuse per interval.""")
disk_mask = param.Boolean(default=True,doc="""
Limits connection sprouting to a disk.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if self.disk_mask:
self.disk = pattern.Disk(size=1.0,smoothing=0.0)
# Get CF and src sheet shapes
cf_x,cf_y = projection.dest.activity.shape
src_x,src_y = projection.src.activity.shape
# Initialize sparse triplet arrays
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=sparse_type)
# Create new sparse matrix to accumulate into
sum_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
# Counters for logging
sprout_sum = 0; prune_sum = 0; unit_total = 0
self.mask_total = 0
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
idx=0
for cidx,cf in enumerate(projection.flatcfs):
temp_weights = cf.weights
dense_unit_mask = (1.0 - (temp_weights>0.0))
dim1,dim2 = temp_weights.shape
sprout_count,prune_idx,nnz = self.calc_ratios(temp_weights)
self.prune(temp_weights,prune_idx)
nnz_pp = np.count_nonzero(temp_weights)
prune_sum += (nnz_pp-nnz)
self.sprout(temp_weights,dense_unit_mask,sprout_count)
nnz_ps = np.count_nonzero(temp_weights)
sprout_sum += nnz_ps - nnz_pp
unit_total += nnz_ps
# Populate sparse array chunk
temp_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
x1,x2,y1,y2 = cf.input_sheet_slice.tolist()
for cnx in range(dim1):
val_array[idx:idx+dim2] = temp_weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+dim2] = range(x_val,x_val+dim2)
y_array[idx:idx+dim2] = cidx
idx += dim2
# Populate combined sparse array with sparse array chunk
if (cidx+1)%cf_y == 0:
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
sum_sparse += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
idx=0
projection.weights = sum_sparse
del temp_sparse, sum_sparse
projection.weights.compress()
self.message("%s pruned by %d and sprouted %d, connection is now %f%% dense" % (projection.name,prune_sum,sprout_sum,(float(unit_total)/self.mask_total)*100))
def sprout(self, temp_weights, mask, sprout_count):
"""
Applies a Gaussian blur to the existing connection field,
selecting the n units with the highest probabilities to sprout
new connections, where n is set by the sprout_count. New
connections are initialized at the minimal strength of the
current CF.
"""
dim1,dim2 = temp_weights.shape
init_weight = temp_weights[temp_weights.nonzero()].min()
blurred_weights = gaussian_filter(temp_weights, sigma=self.kernel_sigma)
blurred_weights = (blurred_weights - blurred_weights.min()) / blurred_weights.max()
sprout_prob_map = (blurred_weights * np.random.rand(dim1,dim2)) * mask
if self.disk_mask:
sprout_prob_map *= self.disk(xdensity=dim2,ydensity=dim1)
sprout_inds = np.unravel_index(np.argsort(sprout_prob_map.flatten())[-sprout_count:],(dim1,dim2))
temp_weights[sprout_inds] = init_weight
def prune(self, temp_weights, prune_idx):
"""
Retracts n connections with the lowest weights, where n is
determined by the piecewise linear function in the calc_ratios
method.
"""
sorted_weights = np.sort(temp_weights.flatten())
threshold = sorted_weights[prune_idx]
temp_weights[temp_weights < threshold] = 0.0
def calc_ratios(self,temp_weights):
"""
Uses a piecewise linear function to determine the unit
proportion of sprouting and retraction and the associated
turnover rates.
Above the target sparsity the sprout/retract ratio scales
linearly up to maximal density, i.e. at full density 100% of
the turnover is put into retraction while at full sparsity
all the turnover is put into sprouting new connections. At
the target density sprouting and retraction are equal.
The turnover is determined also determined by the piecewise
linear function. At maximal distance from the target sparsity,
i.e. at full sparsity or density, the full turnover rate will
be used and as the target sparsity is approached from either
side this term decays to zero. Therefore, a residual turnover
is introduced to ensure that even at the target sparsity some
connections continue to sprout and retract.
"""
dim1,dim2 = temp_weights.shape
if self.disk_mask:
masked_units = len(self.disk(xdensity=dim2,ydensity=dim1).nonzero()[0])
else:
masked_units = dim1*dim2
self.mask_total += masked_units
max_units = dim1*dim2
nnz = np.count_nonzero(temp_weights)
cf_sparsity = nnz / float(masked_units)
delta_sparsity = cf_sparsity - self.target_sparsity
if delta_sparsity > 0:
relative_sparsity = delta_sparsity/(1.0 - self.target_sparsity)
else:
relative_sparsity = delta_sparsity/self.target_sparsity
# Total number of units to modify, broken down into units for pruning and sprouting
delta_units = (abs(self.turnover_rate * relative_sparsity) + self.residual_turnover) * masked_units
prune_factor = 0.5 + (0.5*relative_sparsity)
prune_count = int(delta_units * prune_factor)
prune_idx = (max_units-nnz)+prune_count
sprout_count = int(delta_units * (1-prune_factor))
return sprout_count, prune_idx, nnz
class CFSPRF_Plugin(param.Parameterized):
"""
Generic large-scale response function based on a simple single-CF function.
Applies the single_cf_fn to each CF in turn. For the default single_cf_fn
of DotProduct(), does a basic dot product of each CF with the corresponding
slice of the input array. This function is likely to be slow to run, but
it is easy to extend with any arbitrary single-CF response function.
The single_cf_fn must be a function f(X,W) that takes two identically
shaped matrices X (the input) and W (the CF weights) and computes a scalar
activation value based on those weights.
"""
single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(),doc="""
Accepts a ResponseFn that will be applied to each CF individually.""")
def __call__(self, projection, **params):
single_cf_fn = self.single_cf_fn
for i,cf in enumerate(projection.flatcfs):
X = cf.input_sheet_slice.submatrix(projection.src.activity)
projection.activity.flat[i] = single_cf_fn(X,cf.weights)
projection.activity *= projection.strength
def compute_sparse_joint_norm_totals(projlist,active_units_mask=True):
"""
Compute norm_total for each CF in each projection from a group to be
normalized jointly.
"""
# Assumes that all Projections in the list have the same r,c size
assert len(projlist)>=1
joint_sum = np.zeros(projlist[0].dest.shape,dtype=np.float64)
for p in projlist:
if not p.has_norm_total:
p.norm_total *= 0.0
p.weights.CFWeightTotals(p.norm_total)
p.has_norm_total=True
joint_sum = np.add.reduce([proj.norm_total for proj in projlist],dtype=np.float64)
for p in projlist:
p.norm_total = joint_sum.copy()
def CFPOF_DivisiveNormalizeL1_Sparse(projection):
"""
Sparse CF Projection output function applying L1 divisive normalization
to individual CFs.
"""
if not projection.has_norm_total:
projection.norm_total *= 0.0
projection.weights.CFWeightTotals(projection.norm_total)
projection.weights.DivisiveNormalizeL1(projection.norm_total)
projection.has_norm_total = False
def CFPLF_Hebbian_Sparse(projection):
"""
Sparse CF Projection learning function applying Hebbian learning
to the weights in a projection.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr)
projection.has_norm_total = True
def CFPLF_Hebbian_Sparse_opt(projection):
"""
Sparse CF Projection learning function, which calls an optimized Hebbian
learning function while skipping over inactive units.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian_opt(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr,projection.initialized)
projection.has_norm_total = True
def CFPRF_DotProduct_Sparse(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights.
"""
projection.weights.DotProduct(projection.strength, projection.input_buffer, projection.activity)
def CFPRF_DotProduct_Sparse_opt(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights. Optimization skips
inactive units if a certain percentage of neurons is inactive.
"""
nnz_ratio = np.count_nonzero(projection.src.activity) / len(projection.src.activity.flatten())
if nnz_ratio < 0.1:
projection.weights.DotProduct_opt(projection.strength, projection.src.activity, projection.activity)
else:
projection.weights.DotProduct(projection.strength, projection.src.activity, projection.activity)
class SparseConnectionField(param.Parameterized):
"""
A set of weights on one input Sheet.
Each ConnectionField contributes to the activity of one unit on
the output sheet, and is normally used as part of a Projection
including many other ConnectionFields.
"""
# ALERT: need bounds, more docs
x = param.Number(default=0.0,doc="Sheet X coordinate of CF")
y = param.Number(default=0.0,doc="Sheet Y coordinate of CF")
weights_generator = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,doc="""
Generates initial weights values.""")
min_matrix_radius=param.Integer(default=1)
output_fns = param.HookList(default=[],class_=TransferFn,precedence=0.08,doc="""
Optional function(s) to apply to the pattern array after it has been created.
Can be used for normalization, thresholding, etc.""")
# Class attribute to switch to legacy weight generation if False
independent_weight_generation = True
def get_bounds(self,input_sheet=None):
if not input_sheet == None:
return self.input_sheet_slice.compute_bounds(input_sheet)
else:
return self.input_sheet_slice.compute_bounds(self.input_sheet)
def __get_shape_mask(self):
cf_shape = self.projection.cf_shape
bounds = self.projection.bounds_template
xdensity = self.projection.src.xdensity
ydensity = self.projection.src.xdensity
center_r,center_c = self.projection.src.sheet2matrixidx(0,0)
center_x,center_y = self.projection.src.matrixidx2sheet(center_r,center_c)
cf_mask = cf_shape(x=center_x,y=center_y,bounds=bounds,xdensity=xdensity,ydensity=ydensity)
return cf_mask
shape_mask = property(__get_shape_mask)
def __get_norm_total(self):
return self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]]
def __set_norm_total(self,new_norm_total):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = new_norm_total
def __del_norm_total(self):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = 0.0
norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total)
def __get_mask(self):
x1,x2,y1,y2 = self.input_sheet_slice.tolist()
mask = np.zeros((x2-x1,y2-y1),dtype=np.bool)
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
nz_flat = self.projection.weights[inds,self.oned_idx].toarray()
nz_inds = nz_flat.reshape(x2-x1,y2-y1).nonzero()
mask[nz_inds] = True
return mask
mask = property(__get_mask,
"""
The mask property returns an array of bools representing the
zero weights in the CF weights array.
It is useful when applying additive functions on the weights
array, to ensure zero values are not accidentally overwritten.
The mask cannot be changed via the property, only by changing
the weights directly.
""")
def __get_weights(self):
"""
get_weights accesses the sparse CF matrix and returns the CF
in dense form.
"""
x1,x2,y1,y2 = self.src_slice
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
return self.projection.weights[inds,self.oned_idx].toarray().reshape(x2-x1,y2-y1)
def __set_weights(self,arr):
"""
Takes an input array, which has to match the CF shape, and
creates an mgrid of the appropriate size, adds the proper
offsets and passes the values and indices to the sparse matrix
representation.
"""
x1,x2,y1,y2 = self.src_slice
(dim1,dim2) = arr.shape
assert (dim1,dim2) == (x2-x1,y2-y1), "Array does not match CF shape."
(x,y) = np.mgrid[0:dim1,0:dim2] # Create mgrid of CF size
x_ind = np.array(x)+x1; y_ind = np.array(y) + y1; # Add slice offsets
row_inds = np.ravel_multi_index((x_ind,y_ind),self.projection.src.shape).flatten().astype(np.int32)
col_inds = np.array([self.oned_idx]*len(row_inds),dtype=np.int32)
self.projection.weights.put(arr[x,y].flatten(),row_inds,col_inds)
weights = property(__get_weights,__set_weights)
def __init__(self,template,input_sheet,projection,label=None,**params):
"""
Initializes the CF object and stores meta information about the CF's
shape and position in the SparseCFProjection to allow for easier
initialization.
"""
super(SparseConnectionField,self).__init__(**params)
self.input_sheet = input_sheet
self.projection = projection
self.label = label
self.matrix_idx = self.projection.dest.sheet2matrixidx(self.x,self.y)
self.oned_idx = self.matrix_idx[0] * self.projection.dest.shape[1] + self.matrix_idx[1]
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,self.input_sheet,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.weights_slice = self._create_input_sheet_slice(template)
self.src_slice = tuple(self.input_sheet_slice.tolist())
def _init_weights(self,mask_template):
if not hasattr(mask_template,'view'):
mask = _create_mask(mask_template,
self.weights_slice.compute_bounds(
self.input_sheet),
self.input_sheet,True,0.5)
mask = self.weights_slice.submatrix(mask_template)
mask = np.array(mask,copy=1)
pattern_params = dict(x=self.x,y=self.y,
bounds=self.get_bounds(self.input_sheet),
xdensity=self.input_sheet.xdensity,
ydensity=self.input_sheet.ydensity,
mask=mask)
controlled_weights = (param.Dynamic.time_dependent
and isinstance(param.Dynamic.time_fn,
param.Time)
and self.independent_weight_generation)
if controlled_weights:
with param.Dynamic.time_fn as t:
t(0) # Initialize at time zero.
# Controls random streams
label = '' if self.label is None else self.label
name = "%s_CF (%.5f, %.5f)" % (label, self.x, self.y)
w = self.weights_generator(**dict(pattern_params,
name=name))
else:
w = self.weights_generator(**pattern_params)
w = w.astype(sparse_type)
for of in self.output_fns:
of(w)
return w
def _create_input_sheet_slice(self,template):
"""
Create the input_sheet_slice, which provides the appropriate
Slice for this CF on the input_sheet (as well as providing
this CF's exact bounds).
Also creates the weights_slice, which provides the Slice for
this weights matrix (in case it must be cropped at an edge).
"""
# copy required because the template gets modified here but
# needs to be used again
input_sheet_slice = copy(template)
input_sheet_slice.positionedcrop(self.x,self.y,self.input_sheet)
input_sheet_slice.crop_to_sheet(self.input_sheet)
# weights matrix cannot have a zero-sized dimension (could
# happen at this stage because of cropping)
nrows,ncols = input_sheet_slice.shape_on_sheet()
if nrows<1 or ncols<1:
raise NullCFError(self.x,self.y,self.input_sheet,nrows,ncols)
self.input_sheet_slice = input_sheet_slice
# not copied because we don't use again
template.positionlesscrop(self.x,self.y,self.input_sheet)
return template
def get_input_matrix(self, activity):
return self.input_sheet_slice.submatrix(activity)
class SparseCFProjection(CFProjection):
"""
A projection composed of SparseConnectionFields from a Sheet into
a ProjectionSheet.
SparseCFProjection computes its activity using a response_fn which
can either be an optimized function implemented as part of the
sparse matrix class or an unoptimized function, which requests the
weights in dense format. The initial contents of the
SparseConnectionFields mapping from the input Sheet into the
target ProjectionSheet are controlled by the weights_generator,
cf_shape, and weights_output_fn parameters, while the location of
the ConnectionField is controlled by the coord_mapper parameter.
Any subclass has to implement the interface activate(self) that
computes the response from the input and stores it in the activity
array.
"""
cf_type = param.Parameter(default=SparseConnectionField,doc="""
Type of ConnectionField to use when creating individual CFs.""")
learning_fn = param.Callable(default=CFPLF_Hebbian_Sparse,doc="""
Function for computing changes to the weights based on one activation step.""")
response_fn = param.Callable(default=CFPRF_DotProduct_Sparse,doc="""
Function for computing the Projection response to an input pattern.""")
weights_output_fns = param.HookList(default=[CFPOF_DivisiveNormalizeL1_Sparse],doc="""
Functions applied to each CF after learning.""")
initialized = param.Boolean(default=False)
def __init__(self,initialize_cfs=True,**params):
"""
Initialize the Projection with a set of cf_type objects
(typically SparseConnectionFields), each located at the
location in the source sheet corresponding to the unit in the
target sheet. The cf_type objects are stored in the 'cfs'
array.
The nominal_bounds_template specified may be altered: the
bounds must be fitted to the Sheet's matrix, and the weights
matrix must have odd dimensions. These altered bounds are
passed to the individual connection fields.
A mask for the weights matrix is constructed. The shape is
specified by cf_shape; the size defaults to the size
of the nominal_bounds_template.
"""
super(CFProjection,self).__init__(**params)
self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators')
# get the actual bounds_template by adjusting a copy of the
# nominal_bounds_template to ensure an odd slice, and to be
# cropped to sheet if necessary
self._slice_template = Slice(copy(self.nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.bounds_template = self._slice_template.compute_bounds(self.src)
self.mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
self.n_units = self._calc_n_units()
self.activity = np.array(self.dest.activity)
self.norm_total = np.array(self.dest.activity,dtype=np.float64)
self.has_norm_total = False
if initialize_cfs:
self._create_cfs()
if self.apply_output_fns_init:
self.apply_learn_output_fns()
self.input_buffer = None
def __getstate__(self):
"""
Method to support pickling of sparse weights object.
"""
state_dict = self.__dict__.copy()
state_dict['triplets'] = state_dict['weights'].getTriplets()
state_dict['weight_shape'] = (self.src.activity.shape,self.dest.activity.shape)
del state_dict['weights']
return state_dict
def __setstate__(self,state_dict):
"""
Method to support unpickling of sparse weights object.
"""
self.__dict__.update(state_dict)
self.weights = sparse.csarray_float(self.weight_shape[0],self.weight_shape[1])
rowInds, colInds, values = self.triplets
self.weights.setTriplets(rowInds,colInds,values)
del self.triplets
del self.weight_shape
def _create_cfs(self):
"""
Creates the CF objects, initializing the weights one by one
and adding them to the sparse weights object in chunks.
"""
vectorized_create_cf = simple_vectorize(self._create_cf)
self.cfs = vectorized_create_cf(*self._generate_coords())
self.flatcfs = list(self.cfs.flat)
self.weights = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
cf_x,cf_y = self.dest.activity.shape
src_x,src_y = self.src.activity.shape
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=np.float32)
# Iterate over the CFs
for x in range(cf_x):
temp_sparse = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
idx = 0
for y in range(cf_y):
x1,x2,y1,y2 = self.cfs[x][y].input_sheet_slice.tolist()
if self.same_cf_shape_for_all_cfs:
mask_template = self.mask_template
else:
mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
weights = self.cfs[x][y]._init_weights(mask_template)
cn_x,cn_y = weights.shape
y_val = x * cf_y + y
for cnx in range(cn_x):
val_array[idx:idx+cn_y] = weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+cn_y] = range(x_val,x_val+cn_y)
y_array[idx:idx+cn_y] = y_val
idx += cn_y
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
self.weights += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
del temp_sparse
self.weights.compress()
self.debug("Sparse projection %r loaded" % self.name)
def _create_cf(self,x,y):
"""
Create a ConnectionField at x,y in the src sheet.
"""
label = self.hash_format.format(name=self.name,
src=self.src.name,
dest=self.dest.name)
try:
CF = self.cf_type(template=self._slice_template,
projection=self,input_sheet=self.src,x=x,y=y,
weights_generator=self.weights_generator,
min_matrix_radius=self.min_matrix_radius,
label=label)
except NullCFError:
if self.allow_null_cfs:
CF = None
else:
raise
return CF
def get_sheet_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def get_active_units_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def activate(self,input_activity):
"""Activate using the specified response_fn and output_fn."""
if self.input_fns:
input_activity = input_activity.copy()
for iaf in self.input_fns:
iaf(input_activity)
self.input_buffer = input_activity
self.activity *=0.0
self.response_fn(self)
for of in self.output_fns:
of(self.activity)
def learn(self):
"""
For a SparseCFProjection, learn consists of calling the learning_fn.
"""
# Learning is performed if the input_buffer has already been set,
# i.e. there is an input to the Projection.
if self.input_buffer != None:
self.learning_fn(self)
def apply_learn_output_fns(self,active_units_mask=True):
"""
Apply the weights_output_fns to each unit.
"""
for of in self.weights_output_fns: of(self)
def n_bytes(self):
"""
Estimates the size on the basis of the number non-zeros in the
sparse matrix, asssuming indices and values are stored using
32-bit integers and floats respectively.
"""
return self.n_conns() * (3 * 4)
def n_conns(self):
"""
Returns number of nonzero weights.
"""
return self.weights.getnnz()
if not use_sparse:
print "WARNING: Sparse component could not be imported, replacing SparseCFProjection with regular CFProjection"
def SparseCFProjection(*args, **kwargs): # pyflakes:ignore (optimized version provided)
return CFProjection(*args,**kwargs)
sparse_components = [CFSPLF_Plugin,
CFSPOF_Plugin,
CFSPOF_Prune,
CFSPOF_SproutRetract,
CFSPRF_Plugin,
compute_sparse_joint_norm_totals,
CFPOF_DivisiveNormalizeL1_Sparse,
CFPLF_Hebbian_Sparse,
CFPLF_Hebbian_Sparse_opt,
CFPRF_DotProduct_Sparse,
CFPRF_DotProduct_Sparse_opt,
SparseConnectionField,
SparseCFProjection]
__all__ = sparse_components
|
mjabri/topographica
|
topo/sparse/sparsecf.py
|
Python
|
bsd-3-clause
| 34,427
|
[
"Gaussian"
] |
529e51960817ad12bdc34595063d42015f9dca03acbeccaeea7fcd9a60319611
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._searchfathername import SearchFatherName
from ._searchmothername import SearchMotherName
from ._searchchildname import SearchChildName
from ._regexpfathername import RegExpFatherName
from ._regexpmothername import RegExpMotherName
from ._regexpchildname import RegExpChildName
from ._hasreltype import HasRelType
from ._allfamilies import AllFamilies
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._haslds import HasLDS
from ._regexpidof import RegExpIdOf
from ._hasnote import HasNote
from ._hasnoteregexp import HasNoteRegexp
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hassourcecount import HasSourceCount
from ._hassourceof import HasSourceOf
from ._hasreferencecountof import HasReferenceCountOf
from ._hascitation import HasCitation
from ._familyprivate import FamilyPrivate
from ._hasattribute import HasAttribute
from ._hasevent import HasEvent
from ._isbookmarked import IsBookmarked
from ._matchesfilter import MatchesFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._fatherhasnameof import FatherHasNameOf
from ._fatherhasidof import FatherHasIdOf
from ._motherhasnameof import MotherHasNameOf
from ._motherhasidof import MotherHasIdOf
from ._childhasnameof import ChildHasNameOf
from ._childhasidof import ChildHasIdOf
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._hastwins import HasTwins
editor_rule_list = [
AllFamilies,
HasRelType,
HasGallery,
HasIdOf,
HasLDS,
HasNote,
RegExpIdOf,
HasNoteRegexp,
HasReferenceCountOf,
HasSourceCount,
HasSourceOf,
HasCitation,
FamilyPrivate,
HasEvent,
HasAttribute,
IsBookmarked,
MatchesFilter,
MatchesSourceConfidence,
FatherHasNameOf,
FatherHasIdOf,
MotherHasNameOf,
MotherHasIdOf,
ChildHasNameOf,
ChildHasIdOf,
ChangedSince,
HasTag,
HasTwins,
]
|
pmghalvorsen/gramps_branch
|
gramps/gen/filters/rules/family/__init__.py
|
Python
|
gpl-2.0
| 2,872
|
[
"Brian"
] |
fd3694fc7d60453c8e17e1a5f6aa769a56663f906027ff9802b67b44581f04c1
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sources.starfinder Contains the StarFinder class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
# Import astronomical modules
from astropy.units import Unit
from astropy.convolution import Gaussian2DKernel
# Import the relevant PTS classes and modules
from ..basics.vector import Extent
from ..basics.region import Region
from ..basics.geometry import Coordinate, Circle, Ellipse
from ..basics.skygeometry import SkyCoordinate
from ..core.frame import Frame
from ..core.source import Source
from ..object.star import Star
from ..tools import statistics, fitting
from ...core.basics.configurable import OldConfigurable
from ...core.tools import tables
from ...core.tools import filesystem as fs
from ...core.tools.logging import log
from ..tools import plotting
# -----------------------------------------------------------------
class StarFinder(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
"""
# Call the constructor of the base class
super(StarFinder, self).__init__(config, "magic")
# -- Attributes --
# Initialize an empty list for the stars
self.stars = []
# The image frame
self.frame = None
# The mask covering objects that require special attention
self.special_mask = None
# The mask of of pixels that should be ignored
self.ignore_mask = None
# The mask of bad pixels
self.bad_mask = None
# The stellar catalog
self.catalog = None
# The statistics table
self.statistics = None
# Reference to the galaxy finder
self.galaxy_finder = None
# The segmentation map of stars
self.segments = None
# The regions of stars and saturation sources
self.star_region = None
self.saturation_region = None
# -----------------------------------------------------------------
def run(self, frame, galaxy_finder, catalog, special=None, ignore=None, bad=None):
"""
This function ...
:param frame:
:param galaxy_finder:
:param catalog:
:param special:
:param ignore:
:param bad:
"""
# 1. Call the setup function
self.setup(frame, galaxy_finder, catalog, special, ignore, bad)
# 2. Find the stars
self.find_stars()
# 3. Create the star region
self.create_star_region()
# 3. If requested, find and remove saturated stars
if self.config.find_saturation:
self.find_saturation()
self.create_saturation_region()
# 4. Set the statistics
self.set_statistics()
# 5. Create the segmentation map
self.create_segments()
# -----------------------------------------------------------------
def setup(self, frame, galaxy_finder, catalog, special_mask=None, ignore_mask=None, bad_mask=None):
"""
This function ...
:param frame:
:param galaxy_finder:
:param catalog:
:param special_mask:
:param ignore_mask:
:param bad_mask:
"""
# Call the setup function of the base class
super(StarFinder, self).setup()
# Make a local reference to the frame
self.frame = frame
self.catalog = catalog
# Special and ignore masks
self.special_mask = special_mask
self.ignore_mask = ignore_mask
self.bad_mask = bad_mask
# Make a local reference to the galaxy finder
self.galaxy_finder = galaxy_finder
# Create an empty frame for the segments
self.segments = Frame.zeros_like(self.frame)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the star finder ...")
# Clear the list of stars
self.stars = []
# Clear the image frame
self.frame = None
# -----------------------------------------------------------------
def find_stars(self):
"""
This function ...
:return:
"""
# Load the stars from the stellar catalog
self.load_stars()
# For each star, find a corresponding source in the image
self.find_sources()
# Fit analytical models to the stars
if not self.config.use_frame_fwhm or self.frame.fwhm is None: self.fit_stars()
# Set the final soruces
self.adjust_sources()
# -----------------------------------------------------------------
def load_stars(self):
"""
This function creates the star list from the star catalog.
:return:
"""
# Inform the user
log.info("Loading the stars from the catalog ...")
# Copy the list of galaxies, so that we can removed already encounted galaxies (TODO: change this to use
# an 'encountered' list as well
encountered_galaxies = [False] * len(self.galaxy_finder.galaxies)
galaxy_pixel_position_list = []
galaxy_type_list = []
for galaxy in self.galaxy_finder.galaxies:
galaxy_pixel_position_list.append(galaxy.pixel_position(self.frame.wcs))
if galaxy.principal: galaxy_type_list.append("principal")
elif galaxy.companion: galaxy_type_list.append("companion")
else: galaxy_type_list.append("other")
# Keep track of the distances between the stars and the galaxies
distances = []
on_galaxy_column = [False] * len(self.catalog)
# Create the list of stars
for i in range(len(self.catalog)):
# Get the star properties
catalog = self.catalog["Catalog"][i]
star_id = self.catalog["Id"][i]
ra = self.catalog["Right ascension"][i]
dec = self.catalog["Declination"][i]
ra_error = self.catalog["Right ascension error"][i] * Unit("mas")
dec_error = self.catalog["Declination error"][i] * Unit("mas")
confidence_level = self.catalog["Confidence level"][i]
# Check for which bands magnitudes are defined
magnitudes = {}
magnitude_errors = {}
for name in self.catalog.colnames:
if "magnitude" in name:
band = name.split(" magnitude")[0]
magnitudes[band] = self.catalog[name][i] * Unit("mag")
magnitude_errors[band] = self.catalog[name + " error"][i] * Unit("mag")
# Create a sky coordinate for the star position
position = SkyCoordinate(ra=ra, dec=dec, unit="deg", frame="fk5")
# If the stars falls outside of the frame, skip it
if not self.frame.contains(position): continue
# Create a star object
star = Star(i, catalog=catalog, id=star_id, position=position, ra_error=ra_error,
dec_error=dec_error, magnitudes=magnitudes, magnitude_errors=magnitude_errors)
# Get the position of the star in pixel coordinates
pixel_position = star.pixel_position(self.frame.wcs)
# Check whether 'special'
special = self.special_mask.masks(pixel_position) if self.special_mask is not None else False
cutout = self.frame.cutout_around(pixel_position, 15) if special else None
# Check whether 'ignore'
ignore = self.ignore_mask.masks(pixel_position) if self.ignore_mask is not None else False
# Set attributes based on masks (special and ignore)
star.special = special
star.ignore = ignore
special = False
# -- Checking for foreground or surroudings of galaxy --
if "On galaxy" in self.catalog.colnames: star_on_galaxy = self.catalog["On galaxy"][i]
else:
# Check whether this star is on top of the galaxy, and label it so (by default, star.on_galaxy is False)
if (self.galaxy_finder is not None) and (self.galaxy_finder.principal.source != None):
star_on_galaxy = self.galaxy_finder.principal.contains(pixel_position)
else: star_on_galaxy = False
on_galaxy_column[i] = star_on_galaxy
if special: plotting.plot_box(cutout, title="On galaxy" if star_on_galaxy else "Not on galaxy")
# -- Cross-referencing with the galaxies in the frame --
# Loop over all galaxies to cross-referenc
if self.config.fetching.cross_reference_with_galaxies and star_on_galaxy:
# If a match is found with one of the galaxies, skip this star
if matches_galaxy_position(pixel_position, galaxy_pixel_position_list, galaxy_type_list, encountered_galaxies, self.config.fetching.min_distance_from_galaxy, distances):
if special: plotting.plot_box(cutout, "Matches galaxy position (distance < " + str(self.config.fetching.min_distance_from_galaxy) + ")")
continue
# Set other attributes
star.on_galaxy = star_on_galaxy
star.confidence_level = confidence_level
# Enable track record if requested
if self.config.track_record: star.enable_track_record()
# If the input mask masks this star's position, skip it (don't add it to the list of stars)
#if "bad" in self.image.masks and self.image.masks.bad.masks(pixel_position): continue
if self.bad_mask is not None and self.bad_mask.masks(pixel_position):
if special: plotting.plot_box(cutout, "Covered by bad mask")
continue
# Don't add stars which are indicated as 'not stars'
if self.config.manual_indices.not_stars is not None and i in self.config.manual_indices.not_stars:
if special: plotting.plot_box(cutout, "Indicated as 'not a star'")
continue
# Add the star to the list
self.stars.append(star)
# Add the 'on_galaxy' column to the catalog if necessary
if "On galaxy" not in self.catalog.colnames: self.catalog["On galaxy"] = on_galaxy_column
# Inform the user
if self.config.fetching.cross_reference_with_galaxies: log.debug("10 smallest distances 'star - galaxy': " + ', '.join("{0:.2f}".format(distance) for distance in sorted(distances)[:10]))
# -----------------------------------------------------------------
def find_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Looking for sources near the star positions ...")
# Loop over all stars in the list
for star in self.stars:
# If this sky object should be ignored, skip it
if star.ignore: continue
# Find a source
try: star.find_source(self.frame, self.config.detection)
except Exception as e:
import traceback
log.error("Error when finding source")
print(type(e))
print(e)
traceback.print_exc()
if self.config.plot_track_record_if_exception:
if star.has_track_record: star.track_record.plot()
else: log.warning("Track record is not enabled")
log.error("Continuing with next source ...")
# Inform the user
log.debug("Found a source for {0} out of {1} objects ({2:.2f}%)".format(self.have_source, len(self.stars), self.have_source / len(self.stars) * 100.0))
# -----------------------------------------------------------------
def fit_stars(self):
"""
This function ...
"""
# Inform the user
log.info("Fitting analytical profiles to the sources ...")
# Loop over all stars in the list
for star in self.stars:
# If this star should be ignored, skip it
if star.ignore: continue
# Check if the star has a source (has been detected)
if not star.has_source and self.config.fitting.fit_if_undetected:
# Get the parameters of the circle
ellipse = star.ellipse(self.frame.wcs, self.frame.average_pixelscale, self.config.fitting.initial_radius)
# Create a source object
source = Source.from_ellipse(self.frame, ellipse, self.config.fitting.background_outer_factor)
else: source = None
# Find a model
if star.has_source or source is not None: star.fit_model(self.config.fitting, source)
# If requested, perform sigma-clipping to the list of FWHM's to filter out outliers
if self.config.fitting.sigma_clip_fwhms:
mean, median, stddev = statistics.sigma_clipped_statistics(self.fwhms_pix, self.config.fitting.fwhm_sigma_level)
lower = median - self.config.fitting.fwhm_sigma_level * stddev
upper = median + self.config.fitting.fwhm_sigma_level * stddev
# Loop over all stars for which a model was found
for star in self.stars:
# Ignore stars without model
if not star.has_model: continue
# Remove the model if its FWHM is clipped out
if star.fwhm > upper or star.fwhm < lower: star.model = None
# Inform the user
log.debug("Found a model for {0} out of {1} stars with source ({2:.2f}%)".format(self.have_model, self.have_source, self.have_model/self.have_source*100.0))
# -----------------------------------------------------------------
def remove_stars(self):
"""
This function ...
"""
# Inform the user
log.info("Removing the stars from the frame ...")
# Calculate the default FWHM, for the stars for which a model was not found
default_fwhm = self.fwhm_pix
# Inform the user
log.debug("Default FWHM used when star could not be fitted: {0:.2f} pixels".format(default_fwhm))
# Loop over all stars in the list
for star in self.stars:
# If this star should be ignored, skip it
if star.ignore: continue
# Remove the star in the frame
star.remove(self.frame, self.mask, self.config.removal, default_fwhm)
# -----------------------------------------------------------------
def adjust_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adjusting the star sources to the same sigma level ...")
# Calculate the default FWHM, for the stars for which a model was not found
default_fwhm = self.fwhm_pix
# Loop over all stars
for star in self.stars:
# If this star should be ignored, skip it
if star.ignore: continue
# If this star does not have a source, skip it
if not star.has_source: continue
# Create a source for the desired sigma level and outer factor
star.source = star.source_at_sigma_level(self.frame, default_fwhm, self.config.source_psf_sigma_level, self.config.source_outer_factor)
# -----------------------------------------------------------------
def find_saturation(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Looking for saturated stars ...")
# Check whether sources are found
with_source = self.have_source
if with_source == 0: raise RuntimeError("Not a single source was found")
# Inform the user on the number of stars that have a source
log.debug("Number of stars with source = " + str(with_source))
# Calculate the default FWHM, for the stars for which a model was not found
default_fwhm = self.fwhm_pix
# Set the number of stars where saturation was removed to zero initially
success = 0
star_mask = self.star_region.to_mask(self.frame.xsize, self.frame.ysize)
if self.config.saturation.only_brightest:
fluxes = sorted(self.get_fluxes(without_background=True))
# Percentage method
if self.config.saturation.brightest_method == "percentage":
# Get the number of fluxes lower than the percentage of highest fluxes
percentage = self.config.saturation.brightest_level
fraction = 0.01 * percentage
count_before = int((1.0-fraction)*len(fluxes))
# Determine the flux threshold
flux_threshold = fluxes[count_before-1]
# Sigma clipping method
elif self.config.saturation.brightest_method == "sigma clipping":
# Determine the sigma level
sigma_level = self.config.saturation.brightest_level
# Determine the flux threshold
flux_threshold = statistics.cutoff(fluxes, "sigma_clip", sigma_level)
# Invalid option
else: raise ValueError("Brightest method should be 'percentage' or 'sigma clipping'")
else: flux_threshold = None
# Loop over all stars
for star in self.stars:
# If this star should be ignored, skip it
if star.ignore: continue
# If a flux threshold is defined
if flux_threshold is not None:
# No source, skip right away
if not star.has_source: continue
# Determine the flux of this star
if not star.source.has_background: star.source.estimate_background()
flux = star.get_flux(without_background=True)
# Skip this star if its flux is lower than the threshold
if flux < flux_threshold: continue
# If a model was not found for this star, skip it unless the remove_if_not_fitted flag is enabled
if not star.has_model and not self.config.saturation.remove_if_not_fitted: continue
if star.has_model: assert star.has_source
# Note: DustPedia stars will always get a 'source' during removal (with star.source_at_sigma_level) so star.has_source will already pass
# If a source was not found for this star, skip it unless the remove_if_undetected flag is enabled
if not star.has_source and not self.config.saturation.remove_if_undetected: continue
# Find a saturation source and remove it from the frame
star.find_saturation(self.frame, self.config.saturation, default_fwhm, star_mask)
success += star.has_saturation
# Inform the user
log.debug("Found saturation in " + str(success) + " out of " + str(self.have_source) + " stars with source ({0:.2f}%)".format(success / self.have_source * 100.0))
# -----------------------------------------------------------------
def create_star_region(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating star region ...")
# Initialize the region
self.star_region = Region()
# Calculate the default FWHM (calculated based on fitted stars)
default_fwhm = self.fwhm_pix
# Loop over all galaxies
for star in self.stars:
# Get the center in pixel coordinates
center = star.pixel_position(self.frame.wcs)
# Determine the color, based on the detection level
if star.has_model: color = "blue"
elif star.has_source: color = "green"
else: color = "red"
# Determine the FWHM
fwhm = default_fwhm if not star.has_model else star.fwhm
# Calculate the radius in pixels
radius = fwhm * statistics.fwhm_to_sigma * self.config.source_psf_sigma_level
# Convert the star index to a string
text = str(star.index)
# Create meta information
meta = {"color": color, "text": text}
# Create the shape and add it to the region
shape = Circle(center, radius, meta=meta)
self.star_region.append(shape)
# Add a position for the peak position
if star.has_source and star.source.has_peak:
# Create meta information for the position
meta = {"point": "x"}
# Create the position and add it to the region
position = Coordinate(star.source.peak.x, star.source.peak.y, meta=meta)
self.star_region.append(position)
# -----------------------------------------------------------------
def create_saturation_region(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating saturation region ...")
# Initialize the region
self.saturation_region = Region()
# Loop over all stars
for star in self.stars:
# Skip stars without saturation
if not star.has_saturation: continue
# Convert the star index to a string
text = str(star.index)
# Get aperture properties
center = star.contour.center
major = star.contour.major
minor = star.contour.minor
angle = star.contour.angle.degree
radius = Extent(major, minor)
# Create meta information
meta = {"color": "white", "text": text}
# Create the ellipse and add it to the region
ellipse = Ellipse(center, radius, angle, meta=meta)
self.saturation_region.append(ellipse)
# -----------------------------------------------------------------
def create_segments(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the segmentation map for the stars ...")
# Loop over all stars
for star in self.stars:
# Stars with saturation
if star.has_saturation:
# Add the saturation segment to the segmentation map
self.segments[star.saturation.y_slice, star.saturation.x_slice][star.saturation.mask] = star.index
# Stars without saturation
else:
# Skip stars without a source
if not star.has_source: continue
# Add the star segment to the segmentation map
self.segments[star.source.y_slice, star.source.x_slice][star.source.mask] = star.index
# -----------------------------------------------------------------
def write_cutouts(self):
"""
This function ...
:return:
"""
sigma_level = 3.0
outer_factor = 1.5
method = "polynomial"
shape = Extent(21, 21)
# Determine the full path to the cutouts directory
directory_path = self.full_output_path(self.config.writing.cutouts_path)
# Inform the user
log.info("Writing cutout boxes to " + directory_path + " ...")
# Calculate the default FWHM based on the stars that could be fitted
default_fwhm = self.fwhm_pix
# Loop over all stars
for star in self.stars:
# -- Saturation sources ---
# Check if saturation has been detected for this star
if star.has_saturation:
# Determine the path
path = fs.join(directory_path, "saturation_" + str(star.index) + ".fits")
# Save the saturation source as a FITS file
star.saturation.save(path, origin=self.name)
# -- PSF sources ---
# Check if a model has been found for this star
if star.has_model:
# Determine the path
path = fs.join(directory_path, "star-fitted_" + str(star.index) + ".fits")
# Create source
source = star.source_at_sigma_level(self.original_frame, default_fwhm, sigma_level, outer_factor, use_default_fwhm=True, shape=shape)
# Estimate the background
sigma_clip = not star.on_galaxy
source.estimate_background(method, sigma_clip)
# Save the source as a FITS file
source.save(path, origin=self.name)
# Check if a source was found for this star
elif star.has_source:
# Determine the path
path = fs.join(directory_path, "star-detected_" + str(star.index) + ".fits")
# Create source
source = star.source_at_sigma_level(self.original_frame, default_fwhm, sigma_level, outer_factor, use_default_fwhm=True, shape=shape)
# Estimate the background
sigma_clip = not star.on_galaxy
source.estimate_background(method, sigma_clip)
# Save the source as a FITS file
source.save(path, origin=self.name)
# If no source was found for this star
else:
# Determine the path
path = fs.join(directory_path, "star-undetected_" + str(star.index) + ".fits")
# Create a source for the desired sigma level and outer factor
source = star.source_at_sigma_level(self.original_frame, default_fwhm, sigma_level, outer_factor, use_default_fwhm=True, shape=shape)
# Estimate the background
sigma_clip = not star.on_galaxy
source.estimate_background(method, sigma_clip)
# Save the cutout as a FITS file
source.save(path, origin=self.name)
# -----------------------------------------------------------------
@property
def positions(self):
"""
This function ...
:return:
"""
# Initialize a list to contain the object positions
positions = []
# Loop over the galaxies
for skyobject in self.stars:
# Calculate the pixel coordinate in the frame and add it to the list
positions.append(skyobject.pixel_position(self.frame.wcs))
# Return the list
return positions
# -----------------------------------------------------------------
@property
def have_source(self):
"""
This function ...
:return:
"""
count = 0
for star in self.stars: count += star.has_source
return count
# -----------------------------------------------------------------
@property
def have_model(self):
"""
This function ...
:return:
"""
count = 0
for star in self.stars: count += star.has_model
return count
# -----------------------------------------------------------------
@property
def have_saturation(self):
"""
This function ...
:return:
"""
count = 0
for star in self.stars: count += star.has_saturation
return count
# -----------------------------------------------------------------
@property
def have_contour(self):
"""
This function ...
:return:
"""
count = 0
for star in self.stars: count += star.has_contour
return count
# -----------------------------------------------------------------
@property
def fwhms(self):
"""
This function ...
:return:
"""
# Initialize a list to contain the fwhm of the fitted stars
fwhms = []
# Loop over all stars
for star in self.stars:
# If the star contains a model, add the fwhm of that model to the list
if star.has_model:
fwhm_pix = star.fwhm * Unit("pix")
fwhm_arcsec = fwhm_pix * self.frame.average_pixelscale.to("arcsec/pix")
fwhms.append(fwhm_arcsec)
# Return the list
return fwhms
# -----------------------------------------------------------------
@property
def fwhms_pix(self):
"""
This function ...
:return:
"""
return [(fwhm / self.frame.average_pixelscale.to("arcsec/pix")).to("pix").value for fwhm in self.fwhms]
# -----------------------------------------------------------------
@property
def fluxes(self):
"""
This function ...
:return:
"""
# Initialize a list to contain the fluxes of the stars
fluxes = []
# Loop over all stars
for star in self.stars:
# If the star contains a source and the background of this source has been subtracted, calculate the flux
if star.has_source and star.source.has_background:
# Add the flux to the list
fluxes.append(star.flux)
# Return the list
return fluxes
# -----------------------------------------------------------------
def get_fluxes(self, without_background=False):
"""
This function ...
:param without_background:
:return:
"""
# Initialize a list to contain the fluxes of the stars
fluxes = []
# Loop over all stars
for star in self.stars:
# If the star contains a source and the background of this source has been subtracted, calculate the flux
if star.has_source and star.source.has_background:
# Add the flux to the list
fluxes.append(star.get_flux(without_background))
# Return the list
return fluxes
# -----------------------------------------------------------------
@property
def amplitude_differences(self):
"""
This function ...
:return:
"""
# Initialize
differences = []
# Loop over all stars
for star in self.stars:
# If the star was not fitted, skip it
if not star.has_model: continue
# Determine the amplitude and the position of the center of the model
amplitude_model = star.model.amplitude
center = star.source.cutout.rel_position(fitting.center(star.model))
# Convert into integers
x = int(round(center.x))
y = int(round(center.y))
# Calculate the value of the source at the model's center position
amplitude_source = star.source.subtracted[y, x]
# Calculate the difference of the amplitudes
difference = abs(amplitude_model - amplitude_source)
rel_difference = difference / amplitude_source
# Add the relative difference to the list
differences.append(rel_difference)
# Return the list of differences
return differences
# -----------------------------------------------------------------
@property
def fwhm(self):
"""
This function ...
:return:
"""
# If requested, always use the FWHM defined by the frame object
if self.config.use_frame_fwhm and self.frame.fwhm is not None: return self.frame.fwhm.to("arcsec")
# If the list of FWHM values is empty (the stars were not fitted yet), return None
fwhms = self.fwhms
if len(fwhms) == 0: return None
fwhm_values = [fwhm.to("arcsec").value for fwhm in fwhms]
# Determine the default FWHM and return it
if self.config.fwhm.measure == "max":
return max(fwhm_values) * Unit("arcsec") * self.config.fwhm.scale_factor
elif self.config.fwhm.measure == "mean":
return np.mean(fwhm_values) * Unit("arcsec") * self.config.fwhm.scale_factor
elif self.config.fwhm.measure == "median":
return np.median(fwhm_values) * Unit("arcsec") * self.config.fwhm.scale_factor
else: raise ValueError("Unkown measure for determining the default FWHM")
# -----------------------------------------------------------------
@property
def fwhm_pix(self):
"""
This function ...
:return:
"""
return (self.fwhm / self.frame.average_pixelscale.to("arcsec/pix")).value
# -----------------------------------------------------------------
@property
def kernel(self):
"""
This function ...
:return:
"""
# Create a Gaussian convolution kernel and return it
sigma = self.fwhm_pix * statistics.fwhm_to_sigma
return Gaussian2DKernel(sigma)
# -----------------------------------------------------------------
def set_statistics(self):
"""
This function ...
:return:
"""
index_column = []
have_source_column = []
have_model_column = []
have_saturation_column = []
# Peak
x_peak_column = []
y_peak_column = []
# Fitting -> FWHM
fwhm_column = []
# Saturation -> aperture
x_centroid_column = []
y_centroid_column = []
a_column = []
b_column = []
angle_column = []
# Ignore
ignore_column = []
# Other
#not_star_column = []
#force_column = []
#not_saturation_column = []
# Loop over all stars
for star in self.stars:
index_column.append(star.index)
have_source_column.append(star.has_source)
have_model_column.append(star.has_model)
have_saturation_column.append(star.has_saturation)
if star.has_source and star.source.has_peak:
x_peak_column.append(star.source.peak.x)
y_peak_column.append(star.source.peak.y)
else:
x_peak_column.append(None)
y_peak_column.append(None)
fwhm_column.append(star.fwhm if star.has_model else None)
if star.has_saturation:
contour_position = star.contour.center
x_centroid_column.append(contour_position.x)
y_centroid_column.append(contour_position.y)
a_column.append(star.contour.major)
b_column.append(star.contour.minor)
angle_column.append(star.contour.angle.degree)
else:
x_centroid_column.append(None)
y_centroid_column.append(None)
a_column.append(None)
b_column.append(None)
angle_column.append(None)
ignore_column.append(star.ignore)
#not_star_column.append()
#force_column.append()
#not_saturation_column.append()
# Create data structure and set column names
data = [index_column, have_source_column, have_model_column, have_saturation_column,
x_peak_column, y_peak_column, fwhm_column, x_centroid_column, y_centroid_column, a_column, b_column,
angle_column, ignore_column]
names = ["Star index", "Detected", "Fitted", "Saturated", "Peak x position", "Peak y position", "FWHM",
"Aperture x centroid", "Aperture y centroid", "Aperture a length", "Aperture b length",
"Aperture angle", "Ignore"]
# Create the statistics table
self.statistics = tables.new(data, names)
# -----------------------------------------------------------------
def matches_galaxy_position(position, position_list, type_list, encountered, min_distances, distances=None):
"""
This function ...
:param position:
:param position_list:
:param type_list:
:param encountered:
:param min_distances:
:param distances:
:return:
"""
for j in range(len(encountered)):
# Ignore already encountered galaxies (an other star is already identified with it)
if encountered[j]: continue
# Calculate the pixel position of the galaxy
galaxy_position = position_list[j]
# Calculate the distance between the star's position and the galaxy's center
difference = galaxy_position - position
distance = difference.norm
# Add the star-galaxy distance to the list of distances
if distances is not None: distances.append(distance)
# The principal galaxy/galaxies
if type_list[j] == "principal":
# Check whether the star-galaxy distance is smaller than a certain threshold
if distance <= min_distances.principal: return True
# Companion galaxies
elif type_list[j] == "companion":
if distance <= min_distances.companion:
# Indicate that the current star has been identified with the galaxy with index j
encountered[j] = True
return True
# All other galaxies in the frame
else:
if distance <= min_distances.other:
# Indicate that the current star has been identified with the galaxy with index j
encountered[j] = True
return True
# Return False if none of the galaxies provided a match
return False
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/sources/starfinder.py
|
Python
|
mit
| 38,170
|
[
"Galaxy",
"Gaussian"
] |
745c59e9be0e1d8b7fd1acdaf80428b2e629130e1d3864e15339387c9c5c6066
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import * # noqa
from past.builtins import basestring
import logging
import os
from gmusicapi.utils import utils
from future.utils import with_metaclass
from oauth2client.client import OAuth2WebServerFlow
import oauth2client.file
import webbrowser
class _Base(with_metaclass(utils.DocstringInheritMeta, object)):
"""Factors out common client setup."""
_session_class = utils.NotImplementedField
num_clients = 0 # used to disambiguate loggers
def __init__(self, logger_basename, debug_logging, validate, verify_ssl):
"""
:param debug_logging: each Client has a ``logger`` member.
The logger is named ``gmusicapi.<client class><client number>`` and
will propogate to the ``gmusicapi`` root logger.
If this param is ``True``, handlers will be configured to send
this client's debug log output to disk,
with warnings and above printed to stderr.
`Appdirs <https://pypi.python.org/pypi/appdirs>`__
``user_log_dir`` is used by default. Users can run::
from gmusicapi.utils import utils
print utils.log_filepath
to see the exact location on their system.
If ``False``, no handlers will be configured;
users must create their own handlers.
Completely ignoring logging is dangerous and not recommended.
The Google Music protocol can change at any time; if
something were to go wrong, the logs would be necessary for
recovery.
:param validate: if False, do not validate server responses against
known schemas. This helps to catch protocol changes, but requires
significant cpu work.
This arg is stored as ``self.validate`` and can be safely
modified at runtime.
:param verify_ssl: if False, exceptions will not be raised if there
are problems verifying SSL certificates.
Be wary of using this option; it's almost always better to
fix the machine's SSL configuration than to ignore errors.
"""
# this isn't correct if init is called more than once, so we log the
# client name below to avoid confusion for people reading logs
_Base.num_clients += 1
logger_name = "gmusicapi.%s%s" % (logger_basename,
_Base.num_clients)
self._cache = {}
self.logger = logging.getLogger(logger_name)
self.validate = validate
self._verify_ssl = verify_ssl
def setup_session(s):
s.verify = self._verify_ssl
self.session = self._session_class(rsession_setup=setup_session)
if debug_logging:
utils.configure_debug_log_handlers(self.logger)
self.logger.info("initialized")
self.logout()
def _make_call(self, protocol, *args, **kwargs):
"""Returns the response of a protocol.Call.
args/kwargs are passed to protocol.perform.
CallFailure may be raised."""
return protocol.perform(self.session, self.validate, *args, **kwargs)
def is_authenticated(self):
"""Returns ``True`` if the Api can make an authenticated request."""
return self.session.is_authenticated
def logout(self):
"""Forgets local authentication and cached properties in this Api instance.
Returns ``True`` on success."""
# note to clients: this will be called during __init__.
self.session.logout()
self._cache.clear() # Clear the instance of all cached properties.
self.logger.info("logged out")
return True
class _OAuthClient(_Base):
_path_sentinel = object()
# the default path for credential storage
OAUTH_FILEPATH = utils.NotImplementedField
@classmethod
def perform_oauth(cls, storage_filepath=_path_sentinel, open_browser=False):
"""Provides a series of prompts for a user to follow to authenticate.
Returns ``oauth2client.client.OAuth2Credentials`` when successful.
In most cases, this should only be run once per machine to store
credentials to disk, then never be needed again.
If the user refuses to give access,
``oauth2client.client.FlowExchangeError`` is raised.
:param storage_filepath: a filepath to write the credentials to,
or ``None``
to not write the credentials to disk (which is not recommended).
`Appdirs <https://pypi.python.org/pypi/appdirs>`__
``user_data_dir`` is used by default. Check the OAUTH_FILEPATH field
on this class to see the exact location that will be used.
:param open_browser: if True, attempt to open the auth url
in the system default web browser. The url will be printed
regardless of this param's setting.
This flow is intentionally very simple.
For complete control over the OAuth flow, pass an
``oauth2client.client.OAuth2Credentials``
to :func:`login` instead.
"""
if storage_filepath is cls._path_sentinel:
storage_filepath = cls.OAUTH_FILEPATH
flow = OAuth2WebServerFlow(**cls._session_class.oauth._asdict())
auth_uri = flow.step1_get_authorize_url()
print()
print("Visit the following url:\n %s" % auth_uri)
if open_browser:
print()
#print('Opening your browser to it now...', end=' ')
webbrowser.open(auth_uri)
print('done.')
print("If you don't see your browser, you can just copy and paste the url.")
print()
code = input("Follow the prompts, then paste the auth code here and hit enter: ")
credentials = flow.step2_exchange(code)
if storage_filepath is not None:
if storage_filepath == cls.OAUTH_FILEPATH:
utils.make_sure_path_exists(os.path.dirname(cls.OAUTH_FILEPATH), 0o700)
storage = oauth2client.file.Storage(storage_filepath)
storage.put(credentials)
return credentials
def _oauth_login(self, oauth_credentials):
"""Return True on success."""
if isinstance(oauth_credentials, basestring):
oauth_file = oauth_credentials
if oauth_file == self.OAUTH_FILEPATH:
utils.make_sure_path_exists(os.path.dirname(self.OAUTH_FILEPATH), 0o700)
storage = oauth2client.file.Storage(oauth_file)
oauth_credentials = storage.get()
if oauth_credentials is None:
self.logger.warning("could not retrieve oauth credentials from '%r'", oauth_file)
return False
if not self.session.login(oauth_credentials):
self.logger.warning("failed to authenticate")
return False
self.logger.info("oauth successful")
return True
|
vially/googlemusic-xbmc
|
resources/Lib/gmusicapi/clients/shared.py
|
Python
|
gpl-3.0
| 7,024
|
[
"VisIt"
] |
9df2049a3992df8d86ddd81ee86241c8008e86371c5b599f7df51ea178e34162
|
"""Integration with Galaxy nglims.
"""
from __future__ import print_function
import collections
import copy
import glob
import operator
import os
import subprocess
import joblib
import six
import yaml
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.galaxy.api import GalaxyApiAccess
from bcbio.illumina import flowcell
from bcbio.pipeline.run_info import clean_name
from bcbio.workflow import template
from functools import reduce
def prep_samples_and_config(run_folder, ldetails, fastq_dir, config):
"""Prepare sample fastq files and provide global sample configuration for the flowcell.
Handles merging of fastq files split by lane and also by the bcl2fastq
preparation process.
"""
fastq_final_dir = utils.safe_makedir(os.path.join(fastq_dir, "merged"))
cores = utils.get_in(config, ("algorithm", "num_cores"), 1)
ldetails = joblib.Parallel(cores)(joblib.delayed(_prep_sample_and_config)(x, fastq_dir, fastq_final_dir)
for x in _group_same_samples(ldetails))
config_file = _write_sample_config(run_folder, [x for x in ldetails if x])
return config_file, fastq_final_dir
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir):
"""Prepare output fastq file and configuration for a single sample.
Only passes non-empty files through for processing.
"""
files = []
print("->", ldetail_group[0]["name"], len(ldetail_group))
for read in ["R1", "R2"]:
fastq_inputs = sorted(list(set(reduce(operator.add,
(_get_fastq_files(x, read, fastq_dir) for x in ldetail_group)))))
if len(fastq_inputs) > 0:
files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0]))
if len(files) > 0:
if _non_empty(files[0]):
out = ldetail_group[0]
out["files"] = files
return out
def _non_empty(f):
with utils.open_gzipsafe(f) as in_handle:
for line in in_handle:
return True
return False
def _write_sample_config(run_folder, ldetails):
"""Generate a bcbio-nextgen YAML configuration file for processing a sample.
"""
out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder))
with open(out_file, "w") as out_handle:
fc_name, fc_date = flowcell.parse_dirname(run_folder)
out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails],
key=operator.itemgetter("name", "description")),
"fc_name": fc_name,
"fc_date": fc_date}
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _prepare_sample(data, run_folder):
"""Extract passed keywords from input LIMS information.
"""
want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"])
out = {}
for k, v in data.items():
if k in want:
out[k] = _relative_paths(v, run_folder)
if "algorithm" not in out:
analysis, algorithm = _select_default_algorithm(out.get("analysis"))
out["algorithm"] = algorithm
out["analysis"] = analysis
description = "%s-%s" % (out["name"], clean_name(out["description"]))
out["name"] = [out["name"], description]
out["description"] = description
return out
def _select_default_algorithm(analysis):
"""Provide default algorithm sections from templates or standard
"""
if not analysis or analysis == "Standard":
return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard",
"recalibrate": False, "realign": False, "mark_duplicates": True,
"variantcaller": False}
elif "variant" in analysis:
try:
config, _ = template.name_to_config(analysis)
except ValueError:
config, _ = template.name_to_config("freebayes-variant")
return "variant", config["details"][0]["algorithm"]
else:
return analysis, {}
def _relative_paths(xs, base_path):
"""Adjust paths to be relative to the provided base path.
"""
if isinstance(xs, six.string_types):
if xs.startswith(base_path):
return xs.replace(base_path + "/", "", 1)
else:
return xs
elif isinstance(xs, (list, tuple)):
return [_relative_paths(x, base_path) for x in xs]
elif isinstance(xs, dict):
out = {}
for k, v in xs.items():
out[k] = _relative_paths(v, base_path)
return out
else:
return xs
def _get_fastq_files(ldetail, read, fastq_dir):
"""Retrieve fastq files corresponding to the sample and read number.
"""
return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"],
"Sample_%s" % ldetail["name"],
"%s_*_%s_*.fastq.gz" % (ldetail["name"], read)))
def _concat_bgzip_fastq(finputs, out_dir, read, ldetail):
"""Concatenate multiple input fastq files, preparing a bgzipped output file.
"""
out_file = os.path.join(out_dir, "%s_%s.fastq.gz" % (ldetail["name"], read))
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
subprocess.check_call("zcat %s | bgzip -c > %s" % (" ".join(finputs), tx_out_file), shell=True)
return out_file
def _group_same_samples(ldetails):
"""Move samples into groups -- same groups have identical names.
"""
sample_groups = collections.defaultdict(list)
for ldetail in ldetails:
sample_groups[ldetail["name"]].append(ldetail)
return sorted(sample_groups.values(), key=lambda xs: xs[0]["name"])
def get_runinfo(galaxy_url, galaxy_apikey, run_folder, storedir):
"""Retrieve flattened run information for a processed directory from Galaxy nglims API.
"""
galaxy_api = GalaxyApiAccess(galaxy_url, galaxy_apikey)
fc_name, fc_date = flowcell.parse_dirname(run_folder)
galaxy_info = galaxy_api.run_details(fc_name, fc_date)
if "error" in galaxy_info:
return galaxy_info
if not galaxy_info["run_name"].startswith(fc_date) and not galaxy_info["run_name"].endswith(fc_name):
raise ValueError("Galaxy NGLIMS information %s does not match flowcell %s %s" %
(galaxy_info["run_name"], fc_date, fc_name))
ldetails = _flatten_lane_details(galaxy_info)
out = []
for item in ldetails:
# Do uploads for all non-controls
if item["description"] != "control" or item["project_name"] != "control":
item["upload"] = {"method": "galaxy", "run_id": galaxy_info["run_id"],
"fc_name": fc_name, "fc_date": fc_date,
"dir": storedir,
"galaxy_url": galaxy_url, "galaxy_api_key": galaxy_apikey}
for k in ["lab_association", "private_libs", "researcher", "researcher_id", "sample_id",
"galaxy_library", "galaxy_role"]:
item["upload"][k] = item.pop(k, "")
out.append(item)
return out
def _flatten_lane_details(runinfo):
"""Provide flattened lane information with multiplexed barcodes separated.
"""
out = []
for ldetail in runinfo["details"]:
# handle controls
if "project_name" not in ldetail and ldetail["description"] == "control":
ldetail["project_name"] = "control"
for i, barcode in enumerate(ldetail.get("multiplex", [{}])):
cur = copy.deepcopy(ldetail)
cur["name"] = "%s-%s" % (ldetail["name"], i + 1)
cur["description"] = barcode.get("name", ldetail["description"])
cur["bc_index"] = barcode.get("sequence", "")
cur["project_name"] = clean_name(ldetail["project_name"])
out.append(cur)
return out
|
a113n/bcbio-nextgen
|
bcbio/galaxy/nglims.py
|
Python
|
mit
| 8,015
|
[
"BWA",
"Galaxy"
] |
69dbb2408a6cea557f9af98787fa4b8bd2ad3c9e9afaa4e246e190e2e98b3b7f
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to access resources at ExPASy over the WWW.
See http://www.expasy.ch/
Functions:
- get_prodoc_entry Interface to the get-prodoc-entry CGI script.
- get_prosite_entry Interface to the get-prosite-entry CGI script.
- get_prosite_raw Interface to the get-prosite-raw CGI script.
- get_sprot_raw Interface to the get-sprot-raw CGI script.
- sprot_search_ful Interface to the sprot-search-ful CGI script.
- sprot_search_de Interface to the sprot-search-de CGI script.
"""
# Importing these functions with leading underscore as not intended for reuse
from Bio._py3k import urlopen as _urlopen
from Bio._py3k import urlencode as _urlencode
def get_prodoc_entry(id, cgi='http://www.expasy.ch/cgi-bin/get-prodoc-entry'):
"""get_prodoc_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prodoc-entry') -> handle
Get a handle to a PRODOC entry at ExPASy in HTML format.
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this line:
'There is no PROSITE documentation entry XXX. Please try again.'
"""
# Open a handle to ExPASy.
return _urlopen("%s?%s" % (cgi, id))
def get_prosite_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry'):
"""get_prosite_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry') -> handle
Get a handle to a PROSITE entry at ExPASy in HTML format.
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this line:
'There is currently no PROSITE entry for XXX. Please try again.'
"""
return _urlopen("%s?%s" % (cgi, id))
def get_prosite_raw(id, cgi='http://www.expasy.ch/cgi-bin/get-prosite-raw.pl'):
"""get_prosite_raw(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-raw.pl')
-> handle
Get a handle to a raw PROSITE or PRODOC entry at ExPASy.
For a non-existing key, ExPASy returns nothing.
"""
return _urlopen("%s?%s" % (cgi, id))
def get_sprot_raw(id):
"""Get a handle to a raw SwissProt entry at ExPASy.
For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt
(as per the http://www.expasy.ch/expasy_urls.html documentation).
"""
return _urlopen("http://www.uniprot.org/uniprot/%s.txt" % id)
def sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-ful'):
"""sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-ful') -> handle
Search SwissProt by full text.
"""
variables = {'SEARCH': text}
if make_wild:
variables['makeWild'] = 'on'
if swissprot:
variables['S'] = 'on'
if trembl:
variables['T'] = 'on'
options = _urlencode(variables)
fullcgi = "%s?%s" % (cgi, options)
handle = _urlopen(fullcgi)
return handle
def sprot_search_de(text, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-de'):
"""sprot_search_de(text, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-de') -> handle
Search SwissProt by name, description, gene name, species, or
organelle.
"""
variables = {'SEARCH': text}
if swissprot:
variables['S'] = 'on'
if trembl:
variables['T'] = 'on'
options = _urlencode(variables)
fullcgi = "%s?%s" % (cgi, options)
handle = _urlopen(fullcgi)
return handle
|
zjuchenyuan/BioWeb
|
Lib/Bio/ExPASy/__init__.py
|
Python
|
mit
| 3,735
|
[
"Biopython"
] |
c1d873aea96ecfb1e3d9677868810d7e935db367771fe96b110129cd3756d94b
|
#
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
suite = {
"mxversion" : "5.60.0",
"name" : "fastr",
"versionConflictResolution" : "latest",
"imports" : {
"suites" : [
{
"name" : "truffle",
"subdir" : True,
"version" : "d1bb9076f1fa6af71c60be140f980794596a75b4",
"urls" : [
{"url" : "https://github.com/graalvm/graal", "kind" : "git"},
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
]
},
],
},
"repositories" : {
"snapshots" : {
"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots",
"licenses" : ["GPLv2"]
}
},
"licenses" : {
"GPLv2" : {
"name" : "GNU General Public License, version 2",
"url" : "http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html"
},
},
"defaultLicense" : "GPLv2",
# libraries that we depend on
# N.B. The first four with a "path" attribute must be located
# relative to the suite root and not the mx cache because they are
# explicitly referenced in the Parser annotation processor.
"libraries" : {
"GNUR" : {
"path" : "libdownloads/R-3.4.0.tar.gz",
"urls" : ["http://cran.rstudio.com/src/base/R-3/R-3.4.0.tar.gz"],
"sha1" : "054c1d099006354c89b195df6783b933846ced60",
"resource" : "true"
},
"GNU_ICONV" : {
"path" : "libdownloads/libiconv-1.14.tar.gz",
"urls" : ["http://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.14.tar.gz"],
"sha1" : "be7d67e50d72ff067b2c0291311bc283add36965",
"resource" : "true"
},
"ANTLR-3.5" : {
"path" : "libdownloads/antlr-runtime-3.5.jar",
"urls" : ["http://central.maven.org/maven2/org/antlr/antlr-runtime/3.5/antlr-runtime-3.5.jar"],
"sha1" : "0baa82bff19059401e90e1b90020beb9c96305d7",
},
"ANTLR-C-3.5" : {
"path" : "libdownloads/antlr-complete-3.5.1.jar",
"urls" : ["http://central.maven.org/maven2/org/antlr/antlr-complete/3.5.1/antlr-complete-3.5.1.jar"],
"sha1" : "ebb4b995fd67a9b291ea5b19379509160f56e154",
},
"XZ-1.5" : {
"path" : "libdownloads/xz-1.5.jar",
"urls" : ["http://central.maven.org/maven2/org/tukaani/xz/1.5/xz-1.5.jar"],
"sha1" : "9c64274b7dbb65288237216e3fae7877fd3f2bee",
},
},
"projects" : {
"com.oracle.truffle.r.parser.processor" : {
"sourceDirs" : ["src"],
"dependencies" : [
"ANTLR-3.5",
"ANTLR-C-3.5",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,FastR",
},
"com.oracle.truffle.r.parser" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.parser.processor",
"com.oracle.truffle.r.runtime",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : ["TRUFFLE_R_PARSER_PROCESSOR"],
"workingSets" : "Truffle,FastR",
},
"com.oracle.truffle.r.nodes" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.runtime",
"truffle:TRUFFLE_DEBUG",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.nodes.builtin" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.library",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.nodes.test" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.test",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,FastR,Test",
"jacoco" : "include",
},
"com.oracle.truffle.r.test" : {
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT",
"truffle:TRUFFLE_TCK",
"com.oracle.truffle.r.engine",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,FastR,Test",
"jacoco" : "include",
},
"com.oracle.truffle.r.test.native" : {
"native" : True,
"sourceDirs" : [],
"dependencies" : ["com.oracle.truffle.r.native"],
"platformDependent" : True,
"output" : "com.oracle.truffle.r.test.native",
"results" :[
"urand/lib/liburand.so",
],
"workingSets" : "FastR",
},
"com.oracle.truffle.r.test.packages" : {
"sourceDirs" : ["r"],
"javaCompliance" : "1.8",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.test.packages.analyzer" : {
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT"
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.engine" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.nodes.builtin",
"com.oracle.truffle.r.parser",
"truffle:JLINE",
"truffle:TRUFFLE_DEBUG",
"truffle:TRUFFLE_NFI",
],
"generatedDependencies" : [
"com.oracle.truffle.r.parser",
],
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.runtime" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.launcher",
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_DEBUG",
"XZ-1.5",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.launcher" : {
"sourceDirs" : ["src"],
"dependencies" : [
"sdk:GRAAL_SDK",
"truffle:JLINE",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : [
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.ffi.impl" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.ffi.processor",
"com.oracle.truffle.r.nodes"
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
"R_FFI_PROCESSOR",
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.ffi.processor" : {
"sourceDirs" : ["src"],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.native" : {
"sourceDirs" : [],
# "class" : "FastRNativeProject",
"dependencies" : [
"GNUR",
"GNU_ICONV",
"truffle:TRUFFLE_NFI_NATIVE",
],
"native" : True,
"single_job" : True,
"workingSets" : "FastR",
"buildEnv" : {
"NFI_INCLUDES" : "-I<path:truffle:TRUFFLE_NFI_NATIVE>/include",
},
},
"com.oracle.truffle.r.library" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.ffi.impl",
],
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "1.8",
"workingSets" : "FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.release" : {
"sourceDirs" : ["src"],
"dependencies" : ["com.oracle.truffle.r.native.recommended"],
"class" : "FastRReleaseProject",
"output" : "com.oracle.truffle.r.release"
},
"com.oracle.truffle.r.native.recommended" : {
"dependencies" : [
"com.oracle.truffle.r.native",
"com.oracle.truffle.r.engine",
"com.oracle.truffle.r.ffi.impl"
],
"class" : "FastRNativeRecommendedProject",
"native" : True,
"workingSets" : "FastR",
},
},
"distributions" : {
"TRUFFLE_R_PARSER_PROCESSOR" : {
"description" : "internal support for generating the R parser",
"dependencies" : ["com.oracle.truffle.r.parser.processor"],
"exclude" : [
"ANTLR-3.5",
"ANTLR-C-3.5",
],
"maven" : "False",
},
"R_FFI_PROCESSOR" : {
"description" : "internal support for generating FFI classes",
"dependencies" : ["com.oracle.truffle.r.ffi.processor"],
"maven" : "False",
},
"FASTR" : {
"description" : "class files for compiling against FastR in a separate suite",
"dependencies" : ["com.oracle.truffle.r.engine", "com.oracle.truffle.r.launcher", "com.oracle.truffle.r.ffi.impl"],
"mainClass" : "com.oracle.truffle.r.launcher.RCommand",
"exclude" : [
"truffle:JLINE",
"ANTLR-3.5",
"GNUR",
"GNU_ICONV",
"XZ-1.5",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_DEBUG",
"truffle:TRUFFLE_NFI",
"truffle:TRUFFLE_NFI_NATIVE",
],
},
"FASTR_UNIT_TESTS" : {
"description" : "unit tests",
"dependencies" : [
"com.oracle.truffle.r.test",
"com.oracle.truffle.r.nodes.test"
],
"exclude": ["mx:HAMCREST", "mx:JUNIT", "mx:JMH"],
"distDependencies" : [
"FASTR",
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_DEBUG",
"TRUFFLE_R_PARSER_PROCESSOR",
"truffle:TRUFFLE_TCK",
],
},
"FASTR_UNIT_TESTS_NATIVE" : {
"description" : "unit tests support (from test.native project)",
"native" : True,
"platformDependent" : True,
"dependencies" : [
"com.oracle.truffle.r.test.native",
],
},
"FASTR_RELEASE<rffi>": {
"description" : "a binary release of FastR",
"dependencies" : ["com.oracle.truffle.r.release"],
"os_arch" : {
"linux" : {
"amd64" : {
"path" : "mxbuild/dists/linux/amd64/<rffi>/fastr-release.jar",
},
"sparcv9" : {
"path" : "mxbuild/dists/linux/sparcv9/<rffi>/fastr-release.jar",
},
},
"darwin" : {
"amd64" : {
"path" : "mxbuild/dists/darwin/amd64/<rffi>/fastr-release.jar",
},
},
"solaris" : {
"amd64" : {
"path" : "mxbuild/dists/solaris/amd64/<rffi>/fastr-release.jar",
},
"sparcv9" : {
"path" : "mxbuild/dists/solaris/sparcv9/<rffi>/fastr-release.jar",
},
},
},
},
},
}
|
akunft/fastr
|
mx.fastr/suite.py
|
Python
|
gpl-2.0
| 12,341
|
[
"VisIt"
] |
0fbca47a54daec9ca6c3aec8f923d06e008c9a52ea1d3f56e15d2917e63a368e
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2009 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Classes for sale details """
import datetime
import pango
import gtk
from kiwi.currency import currency
from kiwi.ui.objectlist import Column, SummaryLabel, ColoredColumn
from stoqlib.api import api
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.lib.defaults import payment_value_colorize
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.dialogs.clientdetails import ClientDetailsDialog
from stoqlib.gui.search.searchcolumns import IdentifierColumn
from stoqlib.domain.sale import Sale
from stoqlib.domain.payment.views import PaymentChangeHistoryView
from stoqlib.domain.payment.renegotiation import PaymentRenegotiation
_ = stoqlib_gettext
class _RenegotiationItem(object):
def __init__(self, payment_group):
parent = payment_group.get_parent()
self.parent_id = parent.id
self.open_date = parent.open_date
if isinstance(parent, Sale):
desc = _("Sale %s") % (parent.identifier)
self.total_amount = parent.total_amount
elif isinstance(parent, PaymentRenegotiation):
desc = _("Renegotiation %s") % (parent.identifier)
self.total_amount = parent.total
self.description = desc
class RenegotiationDetailsDialog(BaseEditor):
gladefile = "RenegotiationDetailsDialog"
model_type = PaymentRenegotiation
title = _(u"Renegotiation Details")
size = (750, 460)
hide_footer = True
proxy_widgets = ('status_lbl',
'client_lbl',
'responsible_name',
'open_date_lbl',
'total_lbl',
'notes',
'identifier',
'subtotal_lbl',
'surcharge_lbl',
'discount_lbl')
def _setup_columns(self):
self.items_list.set_columns(self._get_items_columns())
self.payments_list.set_columns(self._get_payments_columns())
self.payments_info_list.set_columns(self._get_payments_info_columns())
def _setup_summary_labels(self):
summary_label = SummaryLabel(klist=self.payments_list,
column='paid_value',
label='<b>%s</b>' % api.escape(_(u"Total:")),
value_format='<b>%s</b>')
summary_label.show()
self.payments_vbox.pack_start(summary_label, False)
def _get_renegotiation_items(self):
for item in self.model.get_items():
yield _RenegotiationItem(item)
def _setup_widgets(self):
if not self.model.client:
self.details_button.set_sensitive(False)
self._setup_columns()
if self.model.status == PaymentRenegotiation.STATUS_RENEGOTIATED:
self.status_details_button.show()
else:
self.status_details_button.hide()
self.items_list.add_list(self._get_renegotiation_items())
self.payments_list.add_list(self.model.payments)
changes = PaymentChangeHistoryView.find_by_group(self.store,
self.model.group)
self.payments_info_list.add_list(changes)
self._setup_summary_labels()
def _get_payments_columns(self):
return [IdentifierColumn('identifier', title=_('Payment #')),
Column('method.description', _("Type"),
data_type=str, width=60),
Column('description', _("Description"), data_type=str,
width=150, expand=True),
Column('due_date', _("Due date"), sorted=True,
data_type=datetime.date, width=90,
justify=gtk.JUSTIFY_RIGHT),
Column('paid_date', _("Paid date"),
data_type=datetime.date, width=90),
Column('status_str', _("Status"), data_type=str, width=80),
ColoredColumn('base_value', _("Value"), data_type=currency,
width=90, color='red',
justify=gtk.JUSTIFY_RIGHT,
data_func=payment_value_colorize),
ColoredColumn('paid_value', _("Paid value"), data_type=currency,
width=92, color='red',
justify=gtk.JUSTIFY_RIGHT,
data_func=payment_value_colorize)]
def _get_items_columns(self):
return [Column('description', _("Description"), sorted=True,
data_type=unicode, expand=True),
Column('open_date', _("Open date"), data_type=datetime.date,
width=90),
Column('total_amount', _("Total"), data_type=currency, width=100)]
def _get_payments_info_columns(self):
return [Column('change_date', _(u"When"),
data_type=datetime.date, sorted=True, ),
Column('description', _(u"Payment"),
data_type=str, expand=True,
ellipsize=pango.ELLIPSIZE_END),
Column('changed_field', _(u"Changed"),
data_type=str, justify=gtk.JUSTIFY_RIGHT),
Column('from_value', _(u"From"),
data_type=str, justify=gtk.JUSTIFY_RIGHT),
Column('to_value', _(u"To"),
data_type=str, justify=gtk.JUSTIFY_RIGHT),
Column('reason', _(u"Reason"),
data_type=str, expand=True,
ellipsize=pango.ELLIPSIZE_END)]
#
# BaseEditor hooks
#
def setup_proxies(self):
self._setup_widgets()
self.add_proxy(self.model, RenegotiationDetailsDialog.proxy_widgets)
#
# Kiwi handlers
#
def on_details_button__clicked(self, button):
run_dialog(ClientDetailsDialog, self, self.store, self.model.client)
def on_status_details_button__clicked(self, button):
run_dialog(RenegotiationDetailsDialog, self, self.store,
self.model.group.renegotiation)
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/renegotiationdetails.py
|
Python
|
gpl-2.0
| 7,090
|
[
"VisIt"
] |
72b880574d117037fe4d818d5bd69b605573684d8518e41e5846f1e70de0d8df
|
import datetime
import unittest
import urllib
from msschem.download import CAMSRegDownload, SilamDownload
import msschem_settings
class TestCAMSRegDownload(unittest.TestCase):
def test_construct_urls_single(self):
required_urls = ['http://download.regional.atmosphere.copernicus.eu/services/CAMS50?&token=MYTOKEN&grid=0.1&model=ENSEMBLE&package=FORECAST_CO_ALLLEVELS&time=0H24H&referencetime=2017-02-15T00:00:00Z&format=NETCDF&licence=yes']
required_fns = ['test_0H24H.nc']
dl = msschem_settings.register_datasources['CAMSReg_ENSEMBLE'].cfg['dldriver']
params = {'fcinit': datetime.datetime(2017, 2, 15, 0),
'fcstart': datetime.datetime(2017, 2, 15, 0),
'fcend': datetime.datetime(2017, 2, 16, 0),
'species': 'CO',}
actual = dl.construct_urls(params, 'test.nc')
for (act_url, act_fn), req_url, req_fn in zip(
actual, required_urls, required_fns):
act_host, act_query = urllib.parse.splitquery(act_url)
req_host, req_query = urllib.parse.splitquery(req_url)
act_params = urllib.parse.parse_qs(act_query)
req_params = urllib.parse.parse_qs(req_query)
act_params.pop('token'), req_params.pop('token')
self.assertEqual(act_fn, req_fn)
self.assertEqual(act_host, req_host)
self.assertEqual(act_params, req_params)
def test_construct_urls_multiple(self):
required_urls = ['http://download.regional.atmosphere.copernicus.eu/services/CAMS50?&token=MYTOKEN&grid=0.1&model=ENSEMBLE&package=FORECAST_CO_ALLLEVELS&time=0H24H&referencetime=2017-02-15T00:00:00Z&format=NETCDF&licence=yes',
'http://download.regional.atmosphere.copernicus.eu/services/CAMS50?&token=MYTOKEN&grid=0.1&model=ENSEMBLE&package=FORECAST_CO_ALLLEVELS&time=25H48H&referencetime=2017-02-15T00:00:00Z&format=NETCDF&licence=yes']
required_fns = ['test_0H24H.nc', 'test_25H48H.nc']
dl = msschem_settings.register_datasources['CAMSReg_ENSEMBLE'].cfg['dldriver']
params = {'fcinit': datetime.datetime(2017, 2, 15, 0),
'fcstart': datetime.datetime(2017, 2, 16, 0),
'fcend': datetime.datetime(2017, 2, 16, 1),
'species': 'CO',}
actual = dl.construct_urls(params, 'test.nc')
for (act_url, act_fn), req_url, req_fn in zip(
actual, required_urls, required_fns):
act_host, act_query = urllib.parse.splitquery(act_url)
req_host, req_query = urllib.parse.splitquery(req_url)
act_params = urllib.parse.parse_qs(act_query)
req_params = urllib.parse.parse_qs(req_query)
act_params.pop('token'), req_params.pop('token')
self.assertEqual(act_fn, req_fn)
self.assertEqual(act_host, req_host)
self.assertEqual(act_params, req_params)
class TestSilamDownload(unittest.TestCase):
def test_construct_urls(self):
required_urls = ['http://silam.fmi.fi/thredds/ncss/silam_europe_v5_5/runs/silam_europe_v5_5_RUN_2017-02-15T00:00:00Z?var=cnc_HCHO_gas&disableLLSubset=on&disableProjSubset=on&horizStride=1&time_start=2017-02-15T01%3A00%3A00Z&time_end=2017-02-20T00%3A00%3A00Z&timeStride=1&vertStride=1&addLatLon=true&accept=netcdf4']
required_fns = ['test.nc']
dl = SilamDownload()
params = {'fcinit': datetime.datetime(2017, 2, 15),
'fcstart': datetime.datetime(2017, 2, 15, 1),
'fcend': datetime.datetime(2017, 2, 20),
'species': 'cnc_HCHO_gas'}
actual = dl.construct_urls(params, 'test.nc')
for (act_url, act_fn), req_url, req_fn in zip(
actual, required_urls, required_fns):
act_host, act_query = urllib.parse.splitquery(act_url)
req_host, req_query = urllib.parse.splitquery(req_url)
act_params = urllib.parse.parse_qs(act_query)
req_params = urllib.parse.parse_qs(req_query)
self.assertEqual(act_fn, req_fn)
self.assertEqual(act_host, req_host)
self.assertEqual(act_params, req_params)
def test_camsreg_download():
dl = msschem_settings.register_datasources['CAMSReg_ENSEMBLE'].cfg['dldriver']
dl.get('CO', datetime.datetime(2017, 2, 15),
datetime.datetime(2017, 2, 16, 0),
datetime.datetime(2017, 2, 16, 1),
'~/tmp/mss/test_camsreg_dl.nc')
def test_silam_download():
dl = SilamDownload()
params = {'fcinit': datetime.datetime(2017, 2, 15),
'fcstart': datetime.datetime(2017, 2, 15, 1),
'fcend': datetime.datetime(2017, 2, 15, 1),
'species': 'cnc_HCHO_gas'}
url, fn = dl.construct_urls(params, '~/tmp/mss/test_silam_dl.nc')[0]
dl.download_file(url, fn)
if __name__ == '__main__':
unittest.main()
|
andreas-h/mss-chem
|
tests/test_download.py
|
Python
|
mit
| 4,941
|
[
"NetCDF"
] |
10d89a62eb12791aacd4a5ee2080f7948298cc022c0496aef684c3c4b00e52db
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import os
import unittest
import numpy as np
from pymatgen.io.lammps.data import LammpsData
from pymatgen.core.structure import Molecule
__author__ = 'Kiran Mathew'
__email__ = 'kmathew@lbl.gov'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class TestLammpsData(unittest.TestCase):
@classmethod
def setUpClass(cls):
polymer_chain = Molecule.from_file(os.path.join(test_dir,"polymer_chain.xyz"))
box_size = [[0.0, 20.0], [0.0, 20.0], [0.0, 20.0]]
cls.lammps_data = LammpsData.from_structure(polymer_chain, box_size)
def test_system_info(self):
atomic_masses = [[1, 1.00794], [2, 12.0107], [3, 15.9994]]
atoms_data = [[1, 1, 2, 0.0, 10.216511872506619, 11.338023345800135, 12.744427580409154],
[2, 1, 1, 0.0, 9.8598518725066189, 12.346833345800135, 12.744427580409154],
[3, 1, 1, 0.0, 9.844392872506619, 10.820737345800135, 11.842773580409153],
[4, 1, 1, 0.0, 9.844392872506619, 10.820737345800135, 13.646081580409154],
[5, 1, 3, 0.0, 11.724011872506619, 11.338023345800135, 12.744427580409154],
[6, 1, 2, 0.0, 12.161361872506619, 9.9959933458001355, 12.744427580409154],
[7, 1, 1, 0.0, 11.789241872506619, 9.4787033458001346, 11.842773580409153],
[8, 1, 1, 0.0, 11.789241872506619, 9.4787033458001346, 13.646081580409154],
[9, 1, 2, 0.0, 12.161361872506619, 9.9959933458001355, 11.236927580409153],
[10, 1, 1, 0.0, 11.057400872506619, 9.9837103458001355, 11.249412580409153],
[11, 1, 1, 0.0, 12.54163787250662, 8.959522345800135, 11.249412580409153],
[12, 1, 3, 0.0, 12.647630872506618, 10.700686345800134, 9.9961595804091541],
[13, 1, 2, 0.0, 12.161361872506619, 9.9959933458001355, 8.8739875804091533],
[14, 1, 1, 0.0, 11.057398872506619, 9.9837063458001349, 8.8864715804091539],
[15, 1, 1, 0.0, 12.541635872506619, 8.9595193458001354, 8.8864715804091539],
[16, 1, 2, 0.0, 12.161361872506619, 8.4884933458001353, 8.8739875804091533],
[17, 1, 1, 0.0, 11.524257872506618, 8.5009783458001351, 7.9723335804091535],
[18, 1, 1, 0.0, 11.524257872506618, 8.5009783458001351, 9.7756415804091539],
[19, 1, 3, 0.0, 13.017544872506619, 7.2477253458001352, 8.8739875804091533],
[20, 1, 2, 0.0, 12.161361872506619, 6.1255533458001352, 8.8739875804091533],
[21, 1, 1, 0.0, 11.524253872506618, 6.1380373458001349, 7.9723335804091535],
[22, 1, 1, 0.0, 11.524253872506618, 6.1380373458001349, 9.7756415804091539],
[23, 1, 2, 0.0, 10.653861872506619, 6.1255533458001352, 8.8739875804091533],
[24, 1, 1, 0.0, 10.666346872506619, 6.7626573458001351, 7.9723335804091535],
[25, 1, 1, 0.0, 10.666346872506619, 6.7626573458001351, 9.7756415804091539],
[26, 1, 3, 0.0, 9.413093872506618, 5.2693693458001345, 8.8739875804091533],
[27, 1, 2, 0.0, 8.2909218725066189, 6.1255533458001352, 8.8739875804091533],
[28, 1, 1, 0.0, 8.3034058725066195, 6.7626613458001348, 7.9723335804091535],
[29, 1, 1, 0.0, 8.3034058725066195, 6.7626613458001348, 9.7756415804091539],
[30, 1, 2, 0.0, 8.2909218725066189, 7.6330533458001355, 8.8739875804091533],
[31, 1, 1, 0.0, 8.9280258725066179, 7.6205673458001346, 7.9723335804091535],
[32, 1, 1, 0.0, 8.9280258725066179, 7.6205673458001346, 9.7756415804091539],
[33, 1, 3, 0.0, 7.4347378725066182, 8.8738213458001347, 8.8739875804091533],
[34, 1, 2, 0.0, 8.2909218725066189, 9.9959933458001355, 8.8739875804091533],
[35, 1, 1, 0.0, 8.9280298725066185, 9.9835093458001349, 7.9723335804091535],
[36, 1, 1, 0.0, 8.9280298725066185, 9.9835093458001349, 9.7756415804091539],
[37, 1, 2, 0.0, 8.2909218725066189, 11.503493345800134, 8.8739875804091533],
[38, 1, 1, 0.0, 8.9280258725066179, 11.491008345800134, 7.9723335804091535],
[39, 1, 1, 0.0, 8.9280258725066179, 11.491008345800134, 9.7756415804091539],
[40, 1, 3, 0.0, 7.4347378725066182, 12.744261345800135, 8.8739875804091533],
[41, 1, 2, 0.0, 8.2909218725066189, 13.866433345800136, 8.8739875804091533],
[42, 1, 1, 0.0, 8.9280298725066185, 13.853949345800135, 7.9723335804091535],
[43, 1, 1, 0.0, 8.9280298725066185, 13.853949345800135, 9.7756415804091539],
[44, 1, 2, 0.0, 8.2909218725066189, 13.866433345800136, 10.381487580409154],
[45, 1, 1, 0.0, 8.6711978725066192, 12.829962345800135, 10.369001580409153],
[46, 1, 1, 0.0, 7.186960872506619, 13.854150345800136, 10.369001580409153],
[47, 1, 3, 0.0, 8.777190872506619, 14.571127345800136, 11.622255580409155],
[48, 1, 2, 0.0, 8.2909218725066189, 13.866433345800136, 12.744427580409154],
[49, 1, 1, 0.0, 8.6711958725066189, 12.829959345800134, 12.731943580409153],
[50, 1, 1, 0.0, 7.1869578725066185, 13.854147345800135, 12.731943580409153],
[51, 1, 1, 0.0, 8.7837588725066187, 14.580646345800135, 13.427099580409154]]
natom_types = 3
natoms = 51
np.testing.assert_almost_equal(self.lammps_data.atomic_masses,
atomic_masses, decimal=10)
np.testing.assert_almost_equal(self.lammps_data.atoms_data, atoms_data,
decimal=6)
self.assertEqual(self.lammps_data.natom_types, natom_types)
self.assertEqual(self.lammps_data.natoms, natoms)
def test_from_file(self):
self.lammps_data.write_data_file(
os.path.join(test_dir, "lammps_data.dat"))
lammps_data = LammpsData.from_file(
os.path.join(test_dir, "lammps_data.dat"))
self.assertEqual(str(lammps_data), str(self.lammps_data))
def tearDown(self):
for x in ["lammps_data.dat"]:
if os.path.exists(os.path.join(test_dir, x)):
os.remove(os.path.join(test_dir, x))
if __name__ == "__main__":
unittest.main()
|
xhqu1981/pymatgen
|
pymatgen/io/lammps/tests/test_lammps_data.py
|
Python
|
mit
| 6,665
|
[
"LAMMPS",
"pymatgen"
] |
41c3bcea1a98efb89242e2c377cc9a6de4fc61ad4cd2ff8565abcfbfeefd3bb8
|
import os
import logging
from functools import partial
from capture_gui.vendor.Qt import QtCore, QtWidgets
from capture_gui import plugin, lib
from capture_gui import tokens
log = logging.getLogger("IO")
class IoAction(QtWidgets.QAction):
def __init__(self, parent, filepath):
super(IoAction, self).__init__(parent)
action_label = os.path.basename(filepath)
self.setText(action_label)
self.setData(filepath)
# check if file exists and disable when false
self.setEnabled(os.path.isfile(filepath))
# get icon from file
info = QtCore.QFileInfo(filepath)
icon_provider = QtWidgets.QFileIconProvider()
self.setIcon(icon_provider.icon(info))
self.triggered.connect(self.open_object_data)
def open_object_data(self):
lib.open_file(self.data())
class IoPlugin(plugin.Plugin):
"""Codec widget.
Allows to set format, compression and quality.
"""
id = "IO"
label = "Save"
section = "app"
order = 40
max_recent_playblasts = 5
def __init__(self, parent=None):
super(IoPlugin, self).__init__(parent=parent)
self.recent_playblasts = list()
self._layout = QtWidgets.QVBoxLayout()
self._layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self._layout)
# region Checkboxes
self.save_file = QtWidgets.QCheckBox(text="Save")
self.open_viewer = QtWidgets.QCheckBox(text="View when finished")
self.raw_frame_numbers = QtWidgets.QCheckBox(text="Raw frame numbers")
checkbox_hlayout = QtWidgets.QHBoxLayout()
checkbox_hlayout.setContentsMargins(5, 0, 5, 0)
checkbox_hlayout.addWidget(self.save_file)
checkbox_hlayout.addWidget(self.open_viewer)
checkbox_hlayout.addWidget(self.raw_frame_numbers)
checkbox_hlayout.addStretch(True)
# endregion Checkboxes
# region Path
self.path_widget = QtWidgets.QWidget()
self.browse = QtWidgets.QPushButton("Browse")
self.file_path = QtWidgets.QLineEdit()
self.file_path.setPlaceholderText("(not set; using scene name)")
tip = "Right click in the text field to insert tokens"
self.file_path.setToolTip(tip)
self.file_path.setStatusTip(tip)
self.file_path.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.file_path.customContextMenuRequested.connect(self.show_token_menu)
path_hlayout = QtWidgets.QHBoxLayout()
path_hlayout.setContentsMargins(0, 0, 0, 0)
path_label = QtWidgets.QLabel("Path:")
path_label.setFixedWidth(30)
path_hlayout.addWidget(path_label)
path_hlayout.addWidget(self.file_path)
path_hlayout.addWidget(self.browse)
self.path_widget.setLayout(path_hlayout)
# endregion Path
# region Recent Playblast
self.play_recent = QtWidgets.QPushButton("Play recent playblast")
self.recent_menu = QtWidgets.QMenu()
self.play_recent.setMenu(self.recent_menu)
# endregion Recent Playblast
self._layout.addLayout(checkbox_hlayout)
self._layout.addWidget(self.path_widget)
self._layout.addWidget(self.play_recent)
# Signals / connections
self.browse.clicked.connect(self.show_browse_dialog)
self.file_path.textChanged.connect(self.options_changed)
self.save_file.stateChanged.connect(self.options_changed)
self.raw_frame_numbers.stateChanged.connect(self.options_changed)
self.save_file.stateChanged.connect(self.on_save_changed)
# Ensure state is up-to-date with current settings
self.on_save_changed()
def on_save_changed(self):
"""Update the visibility of the path field"""
state = self.save_file.isChecked()
if state:
self.path_widget.show()
else:
self.path_widget.hide()
def show_browse_dialog(self):
"""Set the filepath using a browser dialog.
:return: None
"""
path = lib.browse()
if not path:
return
# Maya's browser return Linux based file paths to ensure Windows is
# supported we use normpath
path = os.path.normpath(path)
self.file_path.setText(path)
def add_playblast(self, item):
"""
Add an item to the previous playblast menu
:param item: full path to a playblast file
:type item: str
:return: None
"""
# If item already in the recent playblasts remove it so we are
# sure to add it as the new first most-recent
try:
self.recent_playblasts.remove(item)
except ValueError:
pass
# Add as first in the recent playblasts
self.recent_playblasts.insert(0, item)
# Ensure the playblast list is never longer than maximum amount
# by removing the older entries that are at the end of the list
if len(self.recent_playblasts) > self.max_recent_playblasts:
del self.recent_playblasts[self.max_recent_playblasts:]
# Rebuild the actions menu
self.recent_menu.clear()
for playblast in self.recent_playblasts:
action = IoAction(parent=self, filepath=playblast)
self.recent_menu.addAction(action)
def on_playblast_finished(self, options):
"""Take action after the play blast is done"""
playblast_file = options['filename']
if not playblast_file:
return
self.add_playblast(playblast_file)
def get_outputs(self):
"""
Get the output of the widget based on the user's inputs
:return: collection of needed output values
:rtype: dict
"""
output = {"filename": None,
"raw_frame_numbers": self.raw_frame_numbers.isChecked(),
"viewer": self.open_viewer.isChecked()}
save = self.save_file.isChecked()
if not save:
return output
# get path, if nothing is set fall back to default
# project/images/playblast
path = self.file_path.text()
if not path:
path = lib.default_output()
output["filename"] = path
return output
def get_inputs(self, as_preset):
inputs = {"name": self.file_path.text(),
"save_file": self.save_file.isChecked(),
"open_finished": self.open_viewer.isChecked(),
"recent_playblasts": self.recent_playblasts,
"raw_frame_numbers": self.raw_frame_numbers.isChecked()}
if as_preset:
inputs["recent_playblasts"] = []
return inputs
def apply_inputs(self, settings):
directory = settings.get("name", None)
save_file = settings.get("save_file", True)
open_finished = settings.get("open_finished", True)
raw_frame_numbers = settings.get("raw_frame_numbers", False)
previous_playblasts = settings.get("recent_playblasts", [])
self.save_file.setChecked(save_file)
self.open_viewer.setChecked(open_finished)
self.raw_frame_numbers.setChecked(raw_frame_numbers)
for playblast in reversed(previous_playblasts):
self.add_playblast(playblast)
self.file_path.setText(directory)
def token_menu(self):
"""
Build the token menu based on the registered tokens
:returns: Menu
:rtype: QtWidgets.QMenu
"""
menu = QtWidgets.QMenu(self)
registered_tokens = tokens.list_tokens()
for token, value in registered_tokens.items():
label = "{} \t{}".format(token, value['label'])
action = QtWidgets.QAction(label, menu)
fn = partial(self.file_path.insert, token)
action.triggered.connect(fn)
menu.addAction(action)
return menu
def show_token_menu(self, pos):
"""Show custom manu on position of widget"""
menu = self.token_menu()
globalpos = QtCore.QPoint(self.file_path.mapToGlobal(pos))
menu.exec_(globalpos)
|
Colorbleed/maya-capture-gui
|
capture_gui/plugins/ioplugin.py
|
Python
|
mit
| 8,212
|
[
"BLAST"
] |
82ee979e29ec5b978994da1f828936c4077da9f555154e603f5a626c1754b409
|
# Taken from:
# https://github.com/lennax/biopython/blob/f_loc5/Bio/SeqUtils/Mapper/__init__.py
# Copyright 2012 Lenna X. Peterson <arklenna@gmail.com>
# CoordinateMapper.py originally written by Reece Hart
# Older revisions may be found in this gist:
# https://gist.github.com/3172753
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Coordinate mapper for transformation of positions between genomic,
CDS, and protein coordinates.
Includes methods for converting locations to and from HGVS conventions.
Genbank locations can be parsed with SeqIO.
"""
from .MapPositions import MapPosition, \
GenomePosition, CDSPosition, ProteinPosition
from .CoordinateMapper import CoordinateMapper
|
kantale/MutationInfo
|
biopython_mapper/__init__.py
|
Python
|
mit
| 824
|
[
"Biopython"
] |
e3786031d74b740c47fe99414f6675e31141d21111728f71dea66c6710eeb6b3
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='FitELP',
version='0.1.0',
description='A powerful Python code to perform spectral emission line fits with multiple gaussian components in echelle or long-slit data. ',
url='https://github.com/daniel-muthukrishna/FitELP',
author='Daniel Muthukrishna',
author_email='daniel.muthukrishna@gmail.com',
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='spectra emission spectral line fitting gaussian continuum echelle long-slit',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'matplotlib', 'uncertainties', 'lmfit==0.9.10', 'astropy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.rst', '*.md'],
},
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=run_analysis:main',
],
},
)
|
daniel-muthukrishna/GiantStarFormingRegions
|
setup.py
|
Python
|
mit
| 3,465
|
[
"Gaussian"
] |
d38cbf94e32022e442166d4bd94242b922322da652f6ff049127285a1cc7df48
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for craftgen."""
import abc
import collections
import itertools
import json
from typing import Dict, Sequence
from crafty import data
import scipy.stats
import tensorflow as tf
class MagnitudeNormalizer(metaclass=abc.ABCMeta):
"""Base class for magnitude normalizer."""
def __init__(self, scale: float):
"""Initializer.
Args:
scale: scale parameter for a given distribution (e.g. std for Gaussian).
"""
assert scale > 0.0
self.scale = scale
@abc.abstractmethod
def __call__(self, magnitude: float) -> float:
"""Calculates probability of a given magnitude for a given distribution."""
return
@abc.abstractmethod
def cdf(self, magnitude: float) -> float:
"""Calculates the cumulative probability for a given magnitude."""
return
@classmethod
def create(cls, magnitude_normalization: str, loc: float, scale: float):
if magnitude_normalization == 'normal':
return GaussianMagnitudeNormalizer(loc, scale)
elif magnitude_normalization.startswith('gamma'):
return GammaMagnitudeNormalizer(loc, scale)
else:
raise ValueError('Available types are `gamma`, `gamma` and `normal`'
', but got %s.' % magnitude_normalization)
class GaussianMagnitudeNormalizer(MagnitudeNormalizer):
"""Normalizer based on Normal Distribution."""
def __init__(self, mean: float, std: float):
"""Initializer.
Args:
mean: mean param for Gaussian.
std: std param for Gaussian.
"""
super().__init__(scale=std)
self.normal_dist = scipy.stats.norm(mean, self.scale)
def __call__(self, magnitude: float) -> float:
"""Calculates probability of a given magnitude for a normal distribution."""
# Get cumulative probability from -infinity to magnitude.
assert magnitude >= 0.0
return 2 * (1 - self.cdf(magnitude))
def cdf(self, magnitude: float) -> float:
"""Calculates the cumulative probability for a given magnitude."""
assert magnitude >= 0.0
return self.normal_dist.cdf(magnitude)
class GammaMagnitudeNormalizer(MagnitudeNormalizer):
"""Normalizer based on Gamma Distribution."""
def __init__(self, gamma_shape: float, scale: float):
"""Initializer.
Args:
gamma_shape: Gamma shape param for Gamma.
scale: scale param for Gamma.
"""
super().__init__(scale=scale)
self.gamma_dist = scipy.stats.gamma(gamma_shape)
def __call__(self, magnitude: float) -> float:
"""Calculates probability of a given magnitude for a gamma distribution."""
assert magnitude >= 0.0
return self.gamma_dist.pdf(magnitude / self.scale)
def cdf(self, magnitude: float) -> float:
"""Calculates the cumulative probability for a given magnitude."""
assert magnitude >= 0.0
return self.gamma_dist.cdf(magnitude / self.scale)
def pairwise(iterable):
"""From itertools: iterates pairwise over an iterable.
Args:
iterable: An iterable containing a sequence of items.
Returns:
An iterable containing pairs from the input iterable, e.g.
s0,s1,s2,s3... -> (s0,s1), (s1,s2), (s2, s3), ...
"""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def get_scan_to_path_and_heading_dict(
source_json_path: str) -> Dict[str, Sequence[data.Path]]:
"""Gets `scan` -> [data.Path(path, heading), ...] mapping.
Helper function to collection transition stats house-wise.
Args:
source_json_path: Path to training R2R json.
Returns:
Mapping of scan -> [data.Path(path, heading), ...].
"""
with tf.io.gfile.GFile(source_json_path, 'r') as fp:
dataset = json.load(fp)
scan_to_path_and_heading = collections.defaultdict(list)
for item in dataset:
path_container = data.Path(path=item['path'], heading=item['heading'])
scan_to_path_and_heading[item['scan']].append(path_container)
return scan_to_path_and_heading
|
google-research/crafty
|
util.py
|
Python
|
apache-2.0
| 4,481
|
[
"Gaussian"
] |
7ba12ab84393be58bd8471b3a96872cfaa4cb65598f7fa5d5ab929a1b2b3e7ed
|
#!/usr/bin/python
###############################################################################
#
# This script prepares multiple files of coordination output from MDAnalysis
# coordination output scripts in order to facilitate numerous 1D distributions,
# graph building, and state splitting. This script performs a variety of
# simple processing functions including adding columns, padding the data
# with 0's and sorting the columns by a particular column (namely z value).
#
# Example: For 13/26/39/...-column data with this type (describe elsewhere):
# 1.0 -0.13 -0.193 0.522 0.0 0.0 0.0 0.0 0.0 2.0 9.0 2.0 1748.0
# 2.0 -0.124 -0.013 0.662 0.0 0.0 1.0 0.0 0.0 2.0 8.0 2.0 1748.0
#
# The following command will load two datafiles, normalize
# the column length to 39 and remove the first 2000 lines:
#
# python Ion_Preprocessor.py -f f1.out f2.out -m 3 -c 13 -remove 2000
#
#
# This script typically is the second step in a larger analysis pipeline.
# As far as script history, it's a combination of Row_Add_Time.py,
# Row_Pad_Columns.py, Row_Sorted_OnZKeepAllTags.py, ion_count_splitter.py.
#
# By Chris Ing, 2013 for Python 2.7
#
###############################################################################
from argparse import ArgumentParser
from itertools import product
from re import match
from itertools import permutations
import gzip
#a great helper function to iterate over chunks of a list
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
# A helper function to open with gzip if the file is gzipped
def file_opener(fname):
if fname.endswith('.gz'):
return gzip.open(fname)
else:
return open(fname)
# This script takes a list of lists containing floats
# and adds time. Some datasets do not require this step.
def add_time_column(data_floats, num_cols=13, verbose=False):
data_output = []
for line in data_floats:
non_time_columns = line[1:]
# We chunk the non-time columns into ion groupings that
# were detected at that timestep (-1 because there is no time col)
temp_line = []
for ion in chunker(non_time_columns,num_cols-1):
temp_line.append(line[0])
temp_line.extend(ion)
if verbose:
print line[0], " ".join([str(ion_col) for ion_col in ion]),
# Here we're building a series of temp lines and appending
# one for each timestep.
data_output.append(temp_line)
if verbose:
print "\n",
return data_output
# This will produce a label for each ion at a given timestep for each binding
# mode. If the ion is not in a particular binding mode, it will be given
# a mode of "0". In a way, it's like a less specific version of regex_columns.
def bindingmode_columns(data_floats, sf_col=[5,6], num_cols=13, pad_col=4):
data_output = []
# Compute all the binding modes and turn them into strings
all_binding_modes = product("01",repeat=len(sf_col))
all_binding_strs = ["".join(mode) for mode in all_binding_modes]
for line in data_floats:
temp_line = []
for ion in chunker(line,num_cols):
# In the case that the ion grouping has a hypen
# we know that's a padded column and must be excluded.
if ion[pad_col] != "-":
# Extract the coordination using the columns passed
coords = [ion[col] for col in sf_col]
# The same as above but in string form with booleans
coords_str = "".join([str(int(coord>0)) for coord in coords])
temp_line.append(all_binding_strs.index(coords_str))
else:
temp_line.append("-")
data_output.append(temp_line)
return data_output
# This is a script that produces a regex classification from the input data.
# It requires sorted or sorted/padded data_floats that is returned from
# sort_columns and pad_columns functions.
def regex_columns(data_floats, regex_strings, num_cols=13,
pad_col=4, sf_col=[5,6], sort_col=3, sort_cut=0.0,
max_ions=3, max_coord=9, prefix=None):
data_output = []
# File streams for splitting purposes if the prefix flag is set
if prefix != None:
count_files={}
for regex_id in range(len(regex_strings)+1):
count_files[regex_id] = open(prefix+"_split"+str(regex_id),"w")
for line in data_floats:
temp_label = []
num_ions = 0
for ion in chunker(line,num_cols):
# This functionality was implemented upon discovering that
# zero coordinated ions can exist in either the central cavity
# or the extracellular region of a channel. For this case,
# a sort_value cutoff must be used to determine where the ion
# actually is! Note, look at your SF bound ion distributions
# to see if sort_cut is set correctly. 0.0 is a safe bet...
sort_val = ion[sort_col]
all_zeros = all([ion[col]==0.0 for col in sf_col])
# I need to code a bit of logic for pre-processed data.
if ion[pad_col] != "-":
if (sort_val > sort_cut) and all_zeros:
temp_label.extend("+"*len(sf_col))
else:
temp_label.extend([int(ion[col]) for col in sf_col])
else:
temp_label.extend("-"*len(sf_col))
num_ions += 1
#print temp_label
# Here's a fix for when coordination integer counts are too large
# and it ruins the fixed number of digit state label paradigm that
# is critical for regex matching.
for digit_index, digit in enumerate(temp_label):
if digit != "-" and digit != "+" and digit > max_coord:
temp_label[digit_index] = max_coord
for filler in range(max_ions-num_ions):
temp_label.extend("-"*len(sf_col))
# Convert the label list to a string of length max_ions*len(sf_cols)
temp_string = "".join([str(coord) for coord in temp_label])
assert len(temp_string) == max_ions*len(sf_col)
temp_bool = []
for regex_id, regex in enumerate(regex_strings):
if match(regex, temp_string) is not None:
temp_bool.append(True)
else:
temp_bool.append(False)
# Confirm there aren't multiple regex matches
assert sum(temp_bool) < 2, \
"Multiple regex matches for label " +str(temp_string)
# Here's the catch-all clause for when no regex matches
if sum(temp_bool) == 0.0:
temp_bool.append(True)
else:
temp_bool.append(False)
# Write to filestreams if prefix is set.
if prefix != None:
if sum(temp_bool) == 1:
count_files[temp_bool.index(True)].write(
" ".join([str(col) for col in line]))
count_files[temp_bool.index(True)].write("\n")
else:
count_files[len(regex_strings)].write(
" ".join([str(col) for col in line]))
count_files[len(regex_strings)].write("\n")
data_output.append((temp_string, temp_bool.index(True)))
# Close filestreams.
if prefix != None:
for key in count_files.keys():
count_files[key].close()
return data_output
# This is a script that produces a state stream from the input data
# instead of recomputing it multiple times throughout the script for things
# like transition counting. It requires sorted or sorted/padded data
# that are returned from sort_columns and pad_columns functions.
def label_columns(data_floats, num_cols=13, pad_col=4, sf_col=[5,6],
sort_col=3, sort_cut=0.0, max_ions=3, verbose=False):
data_output = []
for line in data_floats:
temp_label = []
num_ions = 0
for ion in chunker(line,num_cols):
# This functionality was implemented upon discovering that
# zero coordinated ions can exist in either the central cavity
# or the extracellular region of a channel. For this case,
# a sort_value cutoff must be used to determine where the ion
# actually is! Note, look at your SF bound ion distributions
# to see if sort_cut is set correctly.
sort_val = ion[sort_col]
all_zeros = all([ion[col]==0.0 for col in sf_col])
# I need to code a bit of logic for pre-processed data.
if ion[pad_col] != "-":
if (sort_val > sort_cut) and all_zeros:
temp_label.extend("+"*len(sf_col))
else:
temp_label.extend([int(ion[col]) for col in sf_col])
else:
temp_label.extend("-"*len(sf_col))
num_ions += 1
for filler in range(max_ions-num_ions):
temp_label.extend("-"*len(sf_col))
# Convert the label list to a string of length max_ions*len(sf_cols)
temp_string = "".join([str(coord) for coord in temp_label])
data_output.append(temp_string)
return data_output
# This script creates a uniform number of ions in the channel by paddding
# incidents where few ions are detected in the data_floats array.
def pad_columns(data_floats, num_cols=13, max_ions=3,
time_col=0, traj_col=11, verbose=False):
data_output = []
for line in data_floats:
# This is the fake line we're going to pad with.
# We're going to assume that line[0] contains time
# and that we have 7 additional columns other than
# an arbitrary number of coordination count columns.
fake_ion = [int(float(line[time_col])),0.,0.,0.] + \
["-" for x in range(num_cols-7)] + \
[0,int(float(line[traj_col])),0]
chunked_line = chunker(line,num_cols)
num_ions = len(line)/num_cols
temp_line = []
filler_line = []
# Note: if you have a timestep where num_ions exceeds your
# max_ions variable, you are only going to select the
# first "max_ions" ions and you may omit some!
# For this reason it is best to sort your ions first,
# so the deepest ion is preserved.
# Make sure to run ion_counter() to see how much data loss
# you are incurring by choosing a smaller max_ions value.
for ion in list(chunked_line)[:max_ions]:
temp_line.extend(ion)
if verbose:
print " ".join([str(ion_col) for ion_col in ion]),
for filler in range(max_ions-num_ions):
filler_line.extend(fake_ion)
if verbose:
print " ".join([str(ion_col) for ion_col in fake_ion]),
data_output.append(temp_line+filler_line)
if verbose:
print "\n",
return data_output
# This script sorts based on a particular column value, useful for
# ranking ions by z-value. Toggle sorting from highest to lowest
# with the argument plus2minus but it's pretty experimental
# when it comes to the future scripts. Namely the use of traj_col
# and going to a padded column instead of a
def sort_columns(data_floats, num_cols=13, sort_col=3,
verbose=False, plus2minus=True):
data_output = []
for line in data_floats:
chunked_line = chunker(line,num_cols)
temp_line = []
temp_filler = []
# Here we sort each ion grouping by the sort_col argument
# with attention to reverse the list depending on plus2minus.
for ion in sorted(chunked_line, key=lambda col: col[sort_col],
reverse=plus2minus):
if ion[4] == "-":
temp_filler.extend(ion)
else:
temp_line.extend(ion)
if verbose:
if plus2minus:
print " ".join([str(ion_col) for ion_col in temp_line]),
print " ".join([str(ion_col) for ion_col in temp_filler]),
else:
print " ".join([str(ion_col) for ion_col in temp_filler]),
print " ".join([str(ion_col) for ion_col in temp_line]),
print "\n",
if plus2minus:
data_output.append(temp_line+temp_filler)
else:
data_output.append(temp_filler+temp_line)
return data_output
# This simply writes out the data_lines passed in with a simple
# ASCII format. If an filename for output is not specified it
# outputs to standard output, otherwise it outputs to the file specified.
# The write_mode argument may be used to output a series of trajectories
# to the same output file.
def write_columns(data_lines, outfile=None, write_mode="w"):
if outfile is not None:
fout = open(outfile,write_mode)
for line in data_lines:
if outfile is not None:
fout.write(" ".join([str(col) for col in line])+"\n")
else:
print " ".join([str(col) for col in line])
if outfile is not None:
fout.close()
return True
# This function processes the ion species information from datafiles written
# using "write_merged_coordination" and writes a single long list of the
# same length as filenames with the ion species at each timestep. The
# difference between this function and one like label_columns is that
# it takes raw data as input. Note that this outputs a sorted list.
def species_columns(filenames, resid_col=12, sort_col=3,
all_possible_states=[0,1], max_ions=3,
num_cols=13, remove_frames=0, sf_col=None,
plus2minus=True, add_time=False):
data_output = []
# This is a list of permutations of all species orderings where hypen
# is the lack of an ion. Each one is given a numerical state_id.
# Example for 2 ion occupancy: --, 00, 01, 10, 11, 0-, 1-.
all_species_orders = ["-"*max_ions]
for ion_occ in range(max_ions):
temp_orders = product(all_possible_states, repeat=ion_occ+1)
for order in temp_orders:
order_str = "".join([str(col) for col in order])
for filler in range(max_ions-len(order_str)):
order_str += "-"
all_species_orders.append(order_str)
for filename in filenames:
with file_opener(filename) as data_file:
data_raw = data_file.readlines()[remove_frames:]
data_raw_split = [line.strip().split() for line in data_raw]
# Newer versions of my MDAnalysis code don't require adding a time
# column (in past version only time[0] contained the timestep)
if add_time:
data_raw_split = add_time_column(data_raw_split, num_cols=num_cols)
for line in data_raw_split:
chunked_line = chunker(line,num_cols)
# This is created for each line because we don't know
# if the ion is necessarily bound to the SF.
num_ions = 0
temp_line = []
# Sort by the float of the sort_col column number and take
# the deepest max_ions. Usually the outer ions aren't interesting.
for ion in sorted(chunked_line,
key=lambda col: float(col[sort_col]),
reverse=plus2minus): #[:max_ions]:
# This is the secret sauce of this method,
# merge_coordination files have a last decimal place that
# encodes an ion species this is lost when the conversion
# to integer takes place in the normal process_input
# function.
if sf_col != None:
if sum([float(ion[col]) for col in sf_col]) > 0:
num_ions += 1
temp_line.append(ion[resid_col][-1])
else:
num_ions += 1
temp_line.append(ion[resid_col][-1])
# A little messy, but we want to keep getting ions
# if they exist, if we've hit a maximum, then we're done.
if num_ions == max_ions:
break
# Pad the rest with with hyphens
for filler in range(max_ions-num_ions):
temp_line.append("-")
temp_str = "".join(temp_line)
data_output.append((temp_str, all_species_orders.index(temp_str)))
return data_output
# This function preprocesses raw input and returns both the sorted
# ion data aswell as sorted/padded list of lists. float_cols is a list
# of column numbers that will be converted to floats, where the rest of the
# data will be converted to integers.
def process_input(filenames, sort_col=3, num_cols=13,
remove_frames=0, max_ions=3, traj_col=11,
add_time=False, time_increment=1,
padded=False, float_cols=[1,2,3], time_col=0):
# There are empty lists which will contain one or multiple files
# worth of time series data. All columns must have numeric data
# at this stage (later you'll pad the data and add "-" strings)
data_floats = []
# Parse the input file and split and float all the columns.
# This is the required format for all the functions in this
# file.
for filename in filenames:
with file_opener(filename) as data_file:
data_raw = data_file.readlines()[remove_frames:]
data_raw_split = [line.strip().split() for line in data_raw]
# Newer versions of my MDAnalysis code don't require adding a time
# column (in past version only time[0] contained the timestep)
if add_time:
data_raw_split = add_time_column(data_raw_split, num_cols=num_cols)
# This solves the problem of the first line not having a traj_num
# by searching the input file, though it's a problem using a merged
# trajectory.
prev_traj = None
for line in data_raw_split:
if (len(line) > traj_col) and (len(line) > time_col):
prev_traj = int(float(line[traj_col]))
assert prev_traj != None, \
"Input file " + filename + " had no traj_id column"
# Loop over the data and convert everything to integer
# except the float_cols columns.
for line_num, line in enumerate(data_raw_split):
temp_line = []
# This frame number will be used when the time column
# is not located.
frame_num = remove_frames+line_num+1
for colindex, colvalue in enumerate(line):
if (float_cols.count(colindex) > 0 or
float_cols.count(colindex % num_cols) > 0):
temp_line.append(float(colvalue))
else:
temp_line.append(int(float(colvalue)))
# This "pads" zero ion columns with a fake ion with
# a timestamp.
if len(temp_line) > 1:
data_floats.append(temp_line)
else:
# TODO: There's a bug here when prev_time is
# detected above as being half-way through a
# file.
data_floats.append([frame_num] +
[0.,0.,0.] +
["-" for x in range(num_cols-7)] +
[0,prev_traj,0])
# TODO: Write something to remove duplicate lines.
# Since padding normalizes your number of row entries, it's best to
# sort your data a priori in order to capture the inner most ions
# preferentially.
data_floats_sorted = sort_columns(data_floats,
num_cols=num_cols,
sort_col=sort_col)
if padded:
data_floats_padded = pad_columns(data_floats_sorted,
num_cols=num_cols,
max_ions=max_ions,
traj_col=traj_col)
return data_floats_padded
else:
return data_floats_sorted
return False
if __name__ == '__main__':
parser = ArgumentParser(
description='This script parses input columnular ASCII data\
and makes it nice and pretty for subsequent analysis.')
parser.add_argument(
'-f', dest='filenames', type=str, nargs="+", required=True,
help='a filename of coordination data from MDAnalysis trajectory data')
parser.add_argument(
'-m', dest='max_ions', type=int, required=True,
help='the maximum number of ions in the channel to consider')
parser.add_argument(
'-c', dest='num_cols', type=int, default=13,
help='the number of columns per ion in the input')
parser.add_argument(
'-remove', dest='remove_frames', type=int, default=0,
help='this is a number of frames to remove from the start of the data')
parser.add_argument(
'-s', dest='sort_col', type=int, default=3,
help='a zero inclusive column number to sort your row on, typically x,y,z')
parser.add_argument(
'-t', dest='traj_col', type=int, default=11,
help='a zero inclusive column number that contains the run number')
parser.add_argument(
'-o', dest='outfile', type=str, default=None,
help='the file to output the sorted padding output of all input files')
parser.add_argument(
'--addtime', dest='add_time', action="store_true", default=False,
help='an optional argument to add time columns to each ion grouping')
# The following arguments are used for regex state stream processing
parser.add_argument(
'-i', dest='regex', type=str, nargs="+",
help='a list of regex values in quotes for state stream processing')
parser.add_argument(
'-sc', dest='sort_cut', type=float, default=0.0,
help='a value on the sort_col range to classify zero coordinated data')
parser.add_argument(
'-sf', dest='sf_col', type=int, nargs="+", default=[5,6],
help='the coordination integer columns that define the selectivity filter')
# This is only useful for process_species
parser.add_argument(
'-r', dest='resid_col', type=int, default=12,
help='a zero inclusive column number that contains the ion resid')
args = parser.parse_args()
data_f_padded = process_input(filenames=args.filenames,
num_cols=args.num_cols,
max_ions=args.max_ions,
remove_frames=args.remove_frames,
traj_col=args.traj_col,
sort_col=args.sort_col,
add_time=args.add_time,
padded=True)
data_f_label = label_columns(data_f_padded,
num_cols=args.num_cols,
sort_col=args.sort_col,
sort_cut=args.sort_cut,
sf_col=args.sf_col,
max_ions=args.max_ions)
data_f_regex = regex_columns(data_f_padded, regex_strings=args.regex,
num_cols=args.num_cols,
sort_col=args.sort_col,
sort_cut=args.sort_cut,
sf_col=args.sf_col,
max_ions=args.max_ions)
'''
for x,y in zip(data_f_padded, data_f_regex):
print x,y, len(y[0])
if len(y[0]) > args.max_ions*len(args.sf_col):
raise ValueError("State label is too long, possible bug")
'''
#write_columns(data_f_padded, outfile=args.outfile)
|
cing/ChannelAnalysis
|
ChannelAnalysis/CoordAnalysis/Preprocessor.py
|
Python
|
mit
| 23,912
|
[
"MDAnalysis"
] |
3d575be0933b0c711b737c4392a0c53d214553f7aed43be6623be4c97ad27479
|
# definitions
def strip_chars(inputString):
WANTED = '*'
guardCnt = 1
while (True):
try:
index = inputString.rindex(WANTED)
except ValueError:
break
inputString = remove_chars(inputString, index)
guardCnt += 1
if (guardCnt == 3):
break
return inputString
def remove_chars(inputString, index):
# char at the end
if (is_last_char(inputString, index)):
inputString = remove_last(inputString)
if (len(inputString) > 0):
inputString = remove_last(inputString)
# char at the beginning
if (is_first_char(inputString, index)):
inputString = remove_first(inputString)
if (len(inputString) > 0):
inputString = remove_first(inputString)
# char is in the middle
if (is_middle_char(inputString, index)):
inputString = remove_at(inputString, index+1)
inputString = remove_at(inputString, index)
inputString = remove_at(inputString, index-1)
return inputString
def is_first_char(inputString, index):
return index == 0
def is_last_char(inputString, index):
return (index == len(inputString)-1)
def is_middle_char(inputString, index):
return (index > 0 and index < len(inputString)-1)
def remove_last(inputString):
return inputString[0:len(inputString)-1]
def remove_first(inputString):
return inputString[1:len(inputString)]
def remove_at(inputString, index):
# print "inp: ", inputString, " (", index, ")"
# print "ret: ", inputString[0:index] + "_" + inputString[index+1:len(inputString)]
return inputString[0:index] + inputString[index+1:len(inputString)]
def show_result_message(inputString, output, expected):
print "-------------------------"
print OKGREEN+"PASS"+ENDC if output == expected else FAIL+"FAIL"+ENDC,
print "input:", "'"+inputString+"'",
print "output:", "'"+output+"'",
print "expected:", "'"+expected+"'"
# main logic
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
inputs = ["adf*lp", "a*o", "*dech*", "de**po", "sa*n*ti", "abc"]
expecteds = ["adp", "", "ec", "do", "si", "abc"]
for i in range(0, len(inputs)):
output = strip_chars(inputs[i])
show_result_message(inputs[i], output, expecteds[i])
|
kszynter/DailyChallenges
|
Challenge111_easy_star-delete/solution_brute_force.py
|
Python
|
mit
| 2,305
|
[
"ADF"
] |
9f90f84a4b4d23bf584f032529c61f12d15732da1a00aed21c576a395d933db2
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import os
from pathlib import Path
from c7n_terraform.parser import TerraformVisitor, Parser, VariableResolver
data_dir = Path(__file__).parent / "data"
def setup_tf(tmp_path, file_map=None):
file_map = file_map or {}
for k, v in file_map.items():
with open(os.path.join(tmp_path, k), "w") as fh:
fh.write(v)
data = Parser().parse_module(Path(str(tmp_path)))
visitor = TerraformVisitor(data, tmp_path)
visitor.visit()
resolver = VariableResolver(visitor)
resolver.resolve()
return visitor
def build_visitor(path, resolver=VariableResolver):
data = Parser().parse_module(path)
visitor = TerraformVisitor(data, path)
visitor.visit()
if resolver:
r = resolver(visitor)
r.resolve()
return visitor
|
thisisshi/cloud-custodian
|
tools/c7n_terraform/tests_terraform/tf_common.py
|
Python
|
apache-2.0
| 871
|
[
"VisIt"
] |
dbe53478e15ff3ddc5f7f5984bf74517243a261ce6303b00b5f9b3f95f36c6ec
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
from pymatgen.io.qchem.sets import *
from pymatgen.util.testing import PymatgenTest
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath, Evan Spotte-Smith"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules")
class QChemDictSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_DictSet = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31G*",
scf_algorithm="diis",
)
self.assertEqual(
test_DictSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"basis": "6-31g*",
"max_scf_cycles": "200",
"method": "wb97xv",
"scf_algorithm": "diis",
"xc_grid": "3",
"geom_opt_max_cycles": "200",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_DictSet.pcm, {})
self.assertEqual(test_DictSet.solvent, {})
self.assertEqual(test_DictSet.smx, {})
self.assertEqual(test_DictSet.molecule, test_molecule)
def test_full_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_DictSet = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31g*",
scf_algorithm="diis",
dft_rung=1,
pcm_dielectric=10.0,
max_scf_cycles=35,
)
self.assertEqual(
test_DictSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"basis": "6-31g*",
"max_scf_cycles": "35",
"method": "b3lyp",
"geom_opt_max_cycles": "200",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(
test_DictSet.pcm,
{
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1",
},
)
self.assertEqual(test_DictSet.solvent, {"dielectric": 10.0})
self.assertEqual(test_DictSet.molecule, test_molecule)
test_DictSet = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31g*",
scf_algorithm="diis",
dft_rung=1,
smd_solvent="water",
max_scf_cycles=35,
)
self.assertEqual(
test_DictSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"basis": "6-31g*",
"max_scf_cycles": "35",
"method": "b3lyp",
"geom_opt_max_cycles": "200",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_DictSet.smx, {"solvent": "water"})
def test_overwrite_input(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
overwrite_inputs = {
"rem": {
"method": "b3lyp",
"basis": "6-31g*",
"thresh": 10,
"xc_grid": "000150000302",
}
}
test_OptSet = OptSet(molecule=test_molecule, overwrite_inputs=overwrite_inputs)
act_rem = {
"job_type": "opt",
"gen_scfman": "true",
"basis": "6-31g*",
"max_scf_cycles": "200",
"method": "b3lyp",
"scf_algorithm": "diis",
"xc_grid": "000150000302",
"geom_opt_max_cycles": "200",
"thresh": 10,
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
}
self.assertDictEqual(act_rem, test_OptSet.rem)
def test_double_solvation(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
raised_error = False
dict_set = None
try:
dict_set = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31g*",
scf_algorithm="diis",
dft_rung=1,
pcm_dielectric=10.0,
smd_solvent="water",
max_scf_cycles=35,
)
except ValueError:
raised_error = True
self.assertTrue(raised_error)
self.assertEqual(dict_set, None)
def test_pcm_write(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
dict_set = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31g*",
scf_algorithm="diis",
dft_rung=5,
pcm_dielectric=10.0,
max_scf_cycles=35,
)
dict_set.write("mol.qin")
test_dict = QCInput.from_file("mol.qin").as_dict()
rem = {
"job_type": "opt",
"basis": "6-31G*",
"max_scf_cycles": "35",
"method": "wb97mv",
"geom_opt_max_cycles": "200",
"gen_scfman": "true",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
}
pcm = {
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1",
}
qc_input = QCInput(molecule=test_molecule, rem=rem, pcm=pcm, solvent={"dielectric": "10.0"})
for k, v in qc_input.as_dict().items():
self.assertEqual(v, test_dict[k])
os.remove("mol.qin")
def test_smd_write(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
dict_set = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31g*",
scf_algorithm="diis",
dft_rung=5,
smd_solvent="water",
max_scf_cycles=35,
)
dict_set.write("mol.qin")
test_dict = QCInput.from_file("mol.qin").as_dict()
rem = {
"job_type": "opt",
"basis": "6-31G*",
"max_scf_cycles": "35",
"method": "wb97mv",
"geom_opt_max_cycles": "200",
"gen_scfman": "true",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
}
qc_input = QCInput(molecule=test_molecule, rem=rem, smx={"solvent": "water"})
for k, v in qc_input.as_dict().items():
self.assertEqual(v, test_dict[k])
os.remove("mol.qin")
def test_custom_smd_write(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
dict_set = QChemDictSet(
molecule=test_molecule,
job_type="opt",
basis_set="6-31g*",
scf_algorithm="diis",
dft_rung=5,
smd_solvent="custom",
custom_smd="90.00,1.415,0.00,0.735,20.2,0.00,0.00",
max_scf_cycles=35,
)
dict_set.write("mol.qin")
test_dict = QCInput.from_file("mol.qin").as_dict()
rem = {
"job_type": "opt",
"basis": "6-31G*",
"max_scf_cycles": "35",
"method": "wb97mv",
"geom_opt_max_cycles": "200",
"gen_scfman": "true",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
}
qc_input = QCInput(molecule=test_molecule, rem=rem, smx={"solvent": "other"})
for k, v in qc_input.as_dict().items():
self.assertEqual(v, test_dict[k])
os.remove("mol.qin")
with open("solvent_data") as sd:
lines = sd.readlines()
self.assertEqual(lines[0], "90.00,1.415,0.00,0.735,20.2,0.00,0.00")
os.remove("solvent_data")
class SinglePointSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(molecule=test_molecule)
self.assertEqual(
test_SPSet.rem,
{
"job_type": "sp",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_SPSet.pcm, {})
self.assertEqual(test_SPSet.solvent, {})
self.assertEqual(test_SPSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_SPSet.rem,
{
"job_type": "sp",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(
test_SPSet.pcm,
{
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1",
},
)
self.assertEqual(test_SPSet.solvent, {"dielectric": 10.0})
self.assertEqual(test_SPSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(molecule=test_molecule, smd_solvent="water")
self.assertEqual(
test_SPSet.rem,
{
"job_type": "sp",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_SPSet.smx, {"solvent": "water"})
self.assertEqual(test_SPSet.molecule, test_molecule)
def test_plots_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(molecule=test_molecule, smd_solvent="water", plot_cubes=True)
self.assertEqual(
test_SPSet.rem,
{
"job_type": "sp",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
"plots": "true",
"make_cube_files": "true",
},
)
self.assertEqual(test_SPSet.plots, {"grid_spacing": "0.05", "total_density": "0"})
self.assertEqual(test_SPSet.smx, {"solvent": "water"})
self.assertEqual(test_SPSet.molecule, test_molecule)
class OptSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule)
self.assertEqual(
test_OptSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"geom_opt_max_cycles": "200",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_OptSet.pcm, {})
self.assertEqual(test_OptSet.solvent, {})
self.assertEqual(test_OptSet.smx, {})
self.assertEqual(test_OptSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_OptSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"geom_opt_max_cycles": "200",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(
test_OptSet.pcm,
{
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1",
},
)
self.assertEqual(test_OptSet.solvent, {"dielectric": 10.0})
self.assertEqual(test_OptSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule, smd_solvent="water")
self.assertEqual(
test_OptSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"geom_opt_max_cycles": "200",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_OptSet.smx, {"solvent": "water"})
self.assertEqual(test_OptSet.molecule, test_molecule)
def test_overwrite_opt_input(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
overwrite_inputs = {"opt": {"FIXED": ["1 XYZ", "2 XY"]}}
test_OptSet = OptSet(molecule=test_molecule, overwrite_inputs=overwrite_inputs)
act_opt = {"fixed": ["1 XYZ", "2 XY"]}
self.assertDictEqual(act_opt, test_OptSet.opt)
def test_nbo_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule, nbo_params={})
self.assertEqual(
test_OptSet.rem,
{
"job_type": "opt",
"gen_scfman": "true",
"geom_opt_max_cycles": "200",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
"nbo": "true",
},
)
self.assertEqual(test_OptSet.nbo, {})
self.assertEqual(test_OptSet.molecule, test_molecule)
class TransitionStateSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_TSSet = TransitionStateSet(molecule=test_molecule)
self.assertEqual(
test_TSSet.rem,
{
"job_type": "ts",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"geom_opt_max_cycles": "200",
"resp_charges": "true",
"sym_ignore": "true",
"symmetry": "false",
},
)
self.assertEqual(test_TSSet.pcm, {})
self.assertEqual(test_TSSet.solvent, {})
self.assertEqual(test_TSSet.smx, {})
self.assertEqual(test_TSSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_TSSet = TransitionStateSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_TSSet.rem,
{
"job_type": "ts",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"geom_opt_max_cycles": "200",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"resp_charges": "true",
"sym_ignore": "true",
"symmetry": "false",
},
)
self.assertEqual(
test_TSSet.pcm,
{"heavypoints": "194", "hpoints": "194", "radii": "uff", "theory": "cpcm", "vdwscale": "1.1"},
)
self.assertEqual(test_TSSet.solvent, {"dielectric": 10.0})
self.assertEqual(test_TSSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_TSSet = TransitionStateSet(molecule=test_molecule, smd_solvent="water")
self.assertEqual(
test_TSSet.rem,
{
"job_type": "ts",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"geom_opt_max_cycles": "200",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"resp_charges": "true",
"sym_ignore": "true",
"symmetry": "false",
},
)
self.assertEqual(test_TSSet.smx, {"solvent": "water"})
self.assertEqual(test_TSSet.molecule, test_molecule)
class ForceSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_forceset = ForceSet(molecule=test_molecule)
self.assertEqual(
test_forceset.rem,
{
"job_type": "force",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_forceset.pcm, {})
self.assertEqual(test_forceset.solvent, {})
self.assertEqual(test_forceset.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_forceset = ForceSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_forceset.rem,
{
"job_type": "force",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(
test_forceset.pcm,
{"heavypoints": "194", "hpoints": "194", "radii": "uff", "theory": "cpcm", "vdwscale": "1.1"},
)
self.assertEqual(test_forceset.solvent, {"dielectric": 10.0})
self.assertEqual(test_forceset.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_forceset = ForceSet(molecule=test_molecule, smd_solvent="water")
self.assertEqual(
test_forceset.rem,
{
"job_type": "force",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_forceset.smx, {"solvent": "water"})
self.assertEqual(test_forceset.molecule, test_molecule)
class PESScanSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pes_scan.qin")).molecule
test_pes_scan = PESScanSet(molecule=test_molecule, scan_variables={"stre": ["3 6 1.5 1.9 0.01"]})
self.assertEqual(
test_pes_scan.rem,
{
"job_type": "pes_scan",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"geom_opt_max_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"resp_charges": "true",
"sym_ignore": "true",
"symmetry": "false",
},
)
self.assertEqual(test_pes_scan.pcm, dict())
self.assertEqual(test_pes_scan.solvent, dict())
self.assertEqual(test_pes_scan.smx, dict())
self.assertEqual(test_pes_scan.scan, {"stre": ["3 6 1.5 1.9 0.01"]})
self.assertEqual(test_pes_scan.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pes_scan.qin")).molecule
test_pes_scan = PESScanSet(
molecule=test_molecule, pcm_dielectric=10.0, scan_variables={"stre": ["3 6 1.5 1.9 0.01"]}
)
self.assertEqual(
test_pes_scan.rem,
{
"job_type": "pes_scan",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"geom_opt_max_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"resp_charges": "true",
"sym_ignore": "true",
"symmetry": "false",
},
)
self.assertEqual(
test_pes_scan.pcm,
{"heavypoints": "194", "hpoints": "194", "radii": "uff", "theory": "cpcm", "vdwscale": "1.1"},
)
self.assertEqual(test_pes_scan.solvent, {"dielectric": 10.0})
self.assertEqual(test_pes_scan.scan, {"stre": ["3 6 1.5 1.9 0.01"]})
self.assertEqual(test_pes_scan.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pes_scan.qin")).molecule
test_pes_scan = PESScanSet(
molecule=test_molecule, smd_solvent="water", scan_variables={"stre": ["3 6 1.5 1.9 0.01"]}
)
self.assertEqual(
test_pes_scan.rem,
{
"job_type": "pes_scan",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"geom_opt_max_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"resp_charges": "true",
"sym_ignore": "true",
"symmetry": "false",
},
)
self.assertEqual(test_pes_scan.smx, {"solvent": "water"})
self.assertEqual(test_pes_scan.scan, {"stre": ["3 6 1.5 1.9 0.01"]})
self.assertEqual(test_pes_scan.molecule, test_molecule)
class FreqSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_FreqSet = FreqSet(molecule=test_molecule)
self.assertEqual(
test_FreqSet.rem,
{
"job_type": "freq",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_FreqSet.pcm, {})
self.assertEqual(test_FreqSet.solvent, {})
self.assertEqual(test_FreqSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_FreqSet = FreqSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_FreqSet.rem,
{
"job_type": "freq",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "pcm",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(
test_FreqSet.pcm,
{
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1",
},
)
self.assertEqual(test_FreqSet.solvent, {"dielectric": 10.0})
self.assertEqual(test_FreqSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_FreqSet = FreqSet(molecule=test_molecule, smd_solvent="water")
self.assertEqual(
test_FreqSet.rem,
{
"job_type": "freq",
"gen_scfman": "true",
"basis": "def2-tzvppd",
"max_scf_cycles": "200",
"method": "wb97xd",
"scf_algorithm": "diis",
"xc_grid": "3",
"solvent_method": "smd",
"ideriv": "1",
"symmetry": "false",
"sym_ignore": "true",
"resp_charges": "true",
},
)
self.assertEqual(test_FreqSet.smx, {"solvent": "water"})
self.assertEqual(test_FreqSet.molecule, test_molecule)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/io/qchem/tests/test_sets.py
|
Python
|
mit
| 29,555
|
[
"pymatgen"
] |
21134180e328af24bc017f4a937b508978d439c9f67739b7bf4aa80e075892a4
|
import unittest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from meerkat_abacus.config import config
from meerkat_abacus import model
from meerkat_abacus.pipeline_worker.process_steps import to_data_type
from meerkat_abacus.consumer.database_setup import create_db
class TestToDataType(unittest.TestCase):
def setUp(self):
create_db(config.DATABASE_URL, drop=True)
engine = create_engine(config.DATABASE_URL)
model.form_tables(config)
model.Base.metadata.create_all(engine)
self.engine = create_engine(config.DATABASE_URL)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def test_to_data_type(self):
tdt = to_data_type.ToDataType(config, self.session)
data_1 = {"form": "demo_case",
"data": {"intro./visit": "new"}}
data_2 = {"form": "demo_case",
"data": {"intro./visit": "return"}}
data_3 = {"form": "demo_alert",
"data": {"intro./visit": "new"}}
data_4 = {"form": "demo_register",
"data": {"intro./visit": "new"}}
data_5 = {"form": "demo_does_not_exisit",
"data": {"intro./visit": "new"}}
result = tdt.run(data_1["form"], data_1["data"])
self.assertEqual(len(result), 2)
types = [d["data"]["type"] for d in result]
self.assertEqual(["Case", "Visit"], sorted(types))
self.assertEqual(result[0]["data"].get("raw_data"), data_1["data"])
result = tdt.run(data_2["form"], data_2["data"])
self.assertEqual(len(result), 1)
types = [d["data"]["type"] for d in result]
self.assertEqual(["Visit"], sorted(types))
result = tdt.run(data_3["form"], data_3["data"])
self.assertEqual(len(result), 1)
types = [d["data"]["type"] for d in result]
self.assertEqual(["Case"], sorted(types))
self.assertEqual(result[0]["data"].get("link_data"),
{"alert_investigation": [data_1["data"]]})
result = tdt.run(data_4["form"], data_4["data"])
self.assertEqual(len(result), 1)
types = [d["data"]["type"] for d in result]
self.assertEqual(["Register"], sorted(types))
result = tdt.run(data_5["form"], data_5["data"])
self.assertEqual(result, [])
|
who-emro/meerkat_abacus
|
meerkat_abacus/pipeline_worker/tests/test_to_data_type.py
|
Python
|
mit
| 2,384
|
[
"VisIt"
] |
1931a3021f9ade493a604d73eb89ec0135e56735b5586a3ab778d4b2175dd6f3
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1950-2020/0.5x0.5/combined_sources_BC_biofuel_1950-2020.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i311: Black carbon biofuel surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i311'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='BC_biofuel'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='BC biofuel fuel surf emissions'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_BC_biofuel_1950-2020.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of black carbon from 1950 to 2020 (from selected anthropogenic biofuel sources only)'
ocube.attributes['File_version']='v3'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1950-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1950-2020/regrid_BC_biofuel_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 19,110
|
[
"NetCDF"
] |
bfe39d05ba1d4f64651855fb86f5c29abc50da7d903d3fad183bbcf9ae60e0ef
|
"""Quality control and summary metrics for next-gen alignments and analysis.
"""
import collections
import contextlib
import csv
import os
import glob
import shutil
import subprocess
import pandas as pd
import lxml.html
import yaml
from datetime import datetime
from collections import defaultdict
# allow graceful during upgrades
try:
import matplotlib
matplotlib.use('Agg', force=True)
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
plt = None
try:
from fadapa import Fadapa
except ImportError:
Fadapa = None
import pybedtools
import pysam
import toolz as tz
import toolz.dicttoolz as dtz
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils, run_info
from bcbio.install import _get_data_dir
from bcbio.provenance import do
import bcbio.rnaseq.qc
import bcbio.pipeline.datadict as dd
from bcbio.variation import bedutils
from bcbio import broad
from bcbio.variation import coverage_experimental as cov
from bcbio.variation.coverage import decorate_problem_regions
from bcbio.ngsalign.postalign import dedup_bam
# ## High level functions to generate summary
def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
sum_samples = run_parallel("pipeline_summary", samples)
samples_coverage = report_summary(sum_samples, run_parallel)
qsign_info = run_parallel("qsignature_summary", [sum_samples])
summary_file = write_project_summary(samples_coverage, qsign_info)
samples = []
for data in sum_samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
samples.append(data)
samples = _add_researcher_summary(samples, summary_file)
return samples
def pipeline_summary(data):
"""Provide summary information on processing sample.
"""
work_bam = data.get("work_bam")
if data["sam_ref"] is not None and work_bam and work_bam.endswith(".bam"):
logger.info("Generating summary files: %s" % str(data["name"]))
data["summary"] = _run_qc_tools(work_bam, data)
elif data["analysis"].lower().startswith("smallrna-seq"):
work_bam = data["clean_fastq"]
data["summary"] = _run_qc_tools(work_bam, data)
return [[data]]
def prep_pdf(qc_dir, config):
"""Create PDF from HTML summary outputs in QC directory.
Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1
Thanks to: https://www.biostars.org/p/16991/
Works around issues with CSS conversion on CentOS by adjusting CSS.
"""
html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html")
html_fixed = "%s-fixed%s" % os.path.splitext(html_file)
try:
topdf = config_utils.get_program("wkhtmltopdf", config)
except config_utils.CmdNotFound:
topdf = None
if topdf and utils.file_exists(html_file):
out_file = "%s.pdf" % os.path.splitext(html_file)[0]
if not utils.file_exists(out_file):
cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s"
% (html_file, html_fixed))
do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf")
cmd = [topdf, html_fixed, out_file]
do.run(cmd, "Convert QC HTML to PDF")
return out_file
def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
metrics = {}
to_run = []
if "fastqc" not in tz.get_in(("config", "algorithm", "tools_off"), data, []):
to_run.append(("fastqc", _run_fastqc))
if data["analysis"].lower().startswith("rna-seq"):
to_run.append(("bamtools", _run_bamtools_stats))
to_run.append(("qualimap", _rnaseq_qualimap))
elif data["analysis"].lower().startswith("chip-seq"):
to_run.append(["bamtools", _run_bamtools_stats])
elif not data["analysis"].lower().startswith("smallrna-seq"):
to_run += [("bamtools", _run_bamtools_stats), ("gemini", _run_gemini_stats)]
if data["analysis"].lower().startswith(("standard", "variant2")):
to_run.append(["qsignature", _run_qsignature_generator])
if "qualimap" in tz.get_in(("config", "algorithm", "tools_on"), data, []):
to_run.append(("qualimap", _run_qualimap))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
for program_name, qc_fn in to_run:
cur_qc_dir = os.path.join(qc_dir, program_name)
cur_metrics = qc_fn(bam_file, data, cur_qc_dir)
metrics.update(cur_metrics)
if data['config']["algorithm"].get("kraken", None):
if data["analysis"].lower().startswith("smallrna-seq"):
logger.info("Kraken is not compatible with srnaseq pipeline yet.")
else:
ratio = bam.get_aligned_reads(bam_file, data)
cur_metrics = _run_kraken(data, ratio)
metrics.update(cur_metrics)
bam.remove("%s-downsample%s" % os.path.splitext(bam_file))
metrics["Name"] = data["name"][-1]
metrics["Quality format"] = utils.get_in(data,
("config", "algorithm",
"quality_format"),
"standard").lower()
return {"qc": qc_dir, "metrics": metrics}
# ## Generate project level QC summary for quickly assessing large projects
def write_project_summary(samples, qsign_info=None):
"""Write project summary information on the provided samples.
write out dirs, genome resources,
"""
work_dir = samples[0][0]["dirs"]["work"]
out_file = os.path.join(work_dir, "project-summary.yaml")
upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
if "dir" in samples[0][0]["upload"] else "")
date = str(datetime.now())
prev_samples = _other_pipeline_samples(out_file, samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump({"date": date}, out_handle,
default_flow_style=False, allow_unicode=False)
if qsign_info:
qsign_out = utils.deepish_copy(qsign_info[0])
qsign_out.pop("out_dir", None)
yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False,
allow_unicode=False)
yaml.safe_dump({"upload": upload_dir}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
default_flow_style=False, allow_unicode=False)
return out_file
def _other_pipeline_samples(summary_file, cur_samples):
"""Retrieve samples produced previously by another pipeline in the summary output.
"""
cur_descriptions = set([s[0]["description"] for s in cur_samples])
out = []
if os.path.exists(summary_file):
with open(summary_file) as in_handle:
for s in yaml.load(in_handle).get("samples", []):
if s["description"] not in cur_descriptions:
out.append(s)
return out
def _save_fields(sample):
to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata",
"description"]
saved = {k: sample[k] for k in to_save if k in sample}
if "summary" in sample:
saved["summary"] = {"metrics": sample["summary"]["metrics"]}
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
saved["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
saved["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
saved["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return saved
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
# ## Generate researcher specific summaries
def _add_researcher_summary(samples, summary_yaml):
"""Generate summary files per researcher if organized via a LIMS.
"""
by_researcher = collections.defaultdict(list)
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
by_researcher[researcher].append(data["description"])
out_by_researcher = {}
for researcher, descrs in by_researcher.items():
out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher,
set(descrs), samples[0][0])
out = []
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
data["summary"]["researcher"] = out_by_researcher[researcher]
out.append([data])
return out
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data):
"""Generate a CSV file with summary information for a researcher on this project.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")),
"%s-summary.tsv" % run_info.clean_name(researcher))
metrics = ["Total reads", "Mapped reads", "Mapped reads pct", "Duplicates", "Duplicates pct"]
with open(summary_yaml) as in_handle:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(["Name"] + metrics)
for sample in yaml.safe_load(in_handle)["samples"]:
if sample["description"] in descrs:
row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "")
for x in metrics]
writer.writerow(row)
return out_file
# ## Run and parse read information from FastQC
class FastQCParser:
def __init__(self, base_dir, sample=None):
self._dir = base_dir
self.sample = sample
def get_fastqc_summary(self):
ignore = set(["Total Sequences", "Filtered Sequences",
"Filename", "File type", "Encoding"])
stats = {}
for stat_line in self._fastqc_data_section("Basic Statistics")[1:]:
k, v = stat_line.split("\t")[:2]
if k not in ignore:
stats[k] = v
return stats
def _fastqc_data_section(self, section_name):
out = []
in_section = False
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file):
with open(data_file) as in_handle:
for line in in_handle:
if line.startswith(">>%s" % section_name):
in_section = True
elif in_section:
if line.startswith(">>END"):
break
out.append(line.rstrip("\r\n"))
return out
def save_sections_into_file(self):
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file) and Fadapa:
parser = Fadapa(data_file)
module = [m[1] for m in parser.summary()][2:9]
for m in module:
out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv")
dt = self._get_module(parser, m)
dt.to_csv(out_file, sep="\t", index=False)
def _get_module(self, parser, module):
"""
Get module using fadapa package
"""
dt = []
lines = parser.clean_data(module)
header = lines[0]
for data in lines[1:]:
if data[0].startswith("#"): #some modules have two headers
header = data
continue
if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = self.sample
return dt
def _run_gene_coverage(bam_file, data, out_dir):
out_file = os.path.join(out_dir, "gene_coverage.pdf")
ref_file = utils.get_in(data, ("genome_resources", "rnaseq", "transcripts"))
count_file = data["count_file"]
if utils.file_exists(out_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
plot_gene_coverage(bam_file, ref_file, count_file, tx_out_file)
return {"gene_coverage": out_file}
def _run_kraken(data, ratio):
"""Run kraken, generating report in specified directory and parsing metrics.
Using only first paired reads.
"""
# logger.info("Number of aligned reads < than 0.60 in %s: %s" % (str(data["name"]), ratio))
logger.info("Running kraken to determine contaminant: %s" % str(data["name"]))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
kraken_out = os.path.join(qc_dir, "kraken")
out = out_stats = None
db = data['config']["algorithm"]["kraken"]
kraken_cmd = config_utils.get_program("kraken", data["config"])
if db == "minikraken":
db = os.path.join(_get_data_dir(), "genomes", "kraken", "minikraken")
if not os.path.exists(db):
logger.info("kraken: no database found %s, skipping" % db)
return {"kraken_report": "null"}
if not os.path.exists(os.path.join(kraken_out, "kraken_out")):
work_dir = os.path.dirname(kraken_out)
utils.safe_makedir(work_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fn_file = data["files"][0]
if fn_file.endswith("bam"):
logger.info("kraken: need fasta files as input")
return {"kraken_report": "null"}
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
out = os.path.join(tx_tmp_dir, "kraken_out")
out_stats = os.path.join(tx_tmp_dir, "kraken_stats")
cat = "zcat" if fn_file.endswith(".gz") else "cat"
cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick "
"--preload --min-hits 2 "
"--threads {num_cores} "
"--out {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals())
do.run(cl, "kraken: %s" % data["name"][-1])
if os.path.exists(kraken_out):
shutil.rmtree(kraken_out)
shutil.move(tx_tmp_dir, kraken_out)
metrics = _parse_kraken_output(kraken_out, db, data)
return metrics
def _parse_kraken_output(out_dir, db, data):
"""Parse kraken stat info comming from stderr,
generating report with kraken-report
"""
in_file = os.path.join(out_dir, "kraken_out")
stat_file = os.path.join(out_dir, "kraken_stats")
out_file = os.path.join(out_dir, "kraken_summary")
kraken_cmd = config_utils.get_program("kraken-report", data["config"])
classify = unclassify = None
with open(stat_file, 'r') as handle:
for line in handle:
if line.find(" classified") > -1:
classify = line[line.find("(") + 1:line.find(")")]
if line.find(" unclassified") > -1:
unclassify = line[line.find("(") + 1:line.find(")")]
if os.path.getsize(in_file) > 0 and not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals())
do.run(cl, "kraken report: %s" % data["name"][-1])
kraken = {"kraken_clas": classify, "kraken_unclas": unclassify}
kraken_sum = _summarize_kraken(out_file)
kraken.update(kraken_sum)
return kraken
def _summarize_kraken(fn):
"""get the value at species level"""
kraken = {}
list_sp, list_value = [], []
with open(fn) as handle:
for line in handle:
cols = line.strip().split("\t")
sp = cols[5].strip()
if len(sp.split(" ")) > 1 and not sp.startswith("cellular"):
list_sp.append(sp)
list_value.append(cols[0])
kraken = {"kraken_sp": list_sp, "kraken_value": list_value}
return kraken
def _run_fastqc(bam_file, data, fastqc_out):
"""Run fastqc, generating report in specified directory and parsing metrics.
Downsamples to 10 million reads to avoid excessive processing times with large
files, unless we're running a Standard/smallRNA-seq/QC pipeline.
Handles fastqc 0.11+, which use a single HTML file and older versions that use
a directory of files + images. The goal is to eventually move to only 0.11+
"""
sentry_file = os.path.join(fastqc_out, "fastqc_report.html")
if not os.path.exists(sentry_file):
work_dir = os.path.dirname(fastqc_out)
utils.safe_makedir(work_dir)
ds_bam = (bam.downsample(bam_file, data, 1e7)
if data.get("analysis", "").lower() not in ["standard", "smallrna-seq"]
else None)
bam_file = ds_bam if ds_bam else bam_file
frmt = "bam" if bam_file.endswith("bam") else "fastq"
fastqc_name = utils.splitext_plus(os.path.basename(bam_file))[0]
fastqc_clean_name = dd.get_sample_name(data)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
cl = [config_utils.get_program("fastqc", data["config"]),
"-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", frmt, bam_file]
do.run(cl, "FastQC: %s" % data["name"][-1])
tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name)
tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name)
if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file):
utils.safe_makedir(fastqc_out)
# Use sample name for reports instead of bam file name
with open(os.path.join(tx_fastqc_out, "fastqc_data.txt"), 'r') as fastqc_bam_name, \
open(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), 'w') as fastqc_sample_name:
for line in fastqc_bam_name:
fastqc_sample_name.write(line.replace(os.path.basename(bam_file), fastqc_clean_name))
shutil.move(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), os.path.join(fastqc_out, 'fastqc_data.txt'))
shutil.move(tx_combo_file, sentry_file)
if os.path.exists("%s.zip" % tx_fastqc_out):
shutil.move("%s.zip" % tx_fastqc_out, os.path.join(fastqc_out, "%s.zip" % fastqc_clean_name))
elif not os.path.exists(sentry_file):
if os.path.exists(fastqc_out):
shutil.rmtree(fastqc_out)
shutil.move(tx_fastqc_out, fastqc_out)
parser = FastQCParser(fastqc_out, data["name"][-1])
stats = parser.get_fastqc_summary()
parser.save_sections_into_file()
return stats
def _run_complexity(bam_file, data, out_dir):
try:
import pandas as pd
import statsmodels.formula.api as sm
except ImportError:
return {"Unique Starts Per Read": "NA"}
SAMPLE_SIZE = 1000000
base, _ = os.path.splitext(os.path.basename(bam_file))
utils.safe_makedir(out_dir)
out_file = os.path.join(out_dir, base + ".pdf")
df = bcbio.rnaseq.qc.starts_by_depth(bam_file, data["config"], SAMPLE_SIZE)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tmp_out_file:
df.plot(x='reads', y='starts', title=bam_file + " complexity")
fig = plt.gcf()
fig.savefig(tmp_out_file)
print "file saved as", out_file
print "out_dir is", out_dir
return bcbio.rnaseq.qc.estimate_library_complexity(df)
# ## Qualimap
def _parse_num_pct(k, v):
num, pct = v.split(" / ")
return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()}
def _parse_qualimap_globals(table):
"""Retrieve metrics of interest from globals table.
"""
out = {}
want = {"Mapped reads": _parse_num_pct,
"Duplication rate": lambda k, v: {k: v}}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col in want:
out.update(want[col](col, val))
return out
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out
def _parse_qualimap_insertsize(table):
"""Parse insert size metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Median":
out["Insert size (Median)"] = val
return out
def _parse_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = {"Globals": _parse_qualimap_globals,
"Globals (inside of regions)": _parse_qualimap_globals_inregion,
"Coverage": _parse_qualimap_coverage,
"Coverage (inside of regions)": _parse_qualimap_coverage,
"Insert size": _parse_qualimap_insertsize,
"Insert size (inside of regions)": _parse_qualimap_insertsize}
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(parsers[header](table))
new_names = []
for metric in out:
new_names.append(metric + "_qualimap_1e7reads_est")
out = dict(zip(new_names, out.values()))
return out
def _bed_to_bed6(orig_file, out_dir):
"""Convert bed to required bed6 inputs.
"""
bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file)))
if not utils.file_exists(bed6_file):
with open(bed6_file, "w") as out_handle:
for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)):
region = [x for x in list(region) if x]
fillers = [str(i), "1.0", "+"]
full = region + fillers[:6 - len(region)]
out_handle.write("\t".join(full) + "\n")
return bed6_file
def _run_qualimap(bam_file, data, out_dir):
"""Run qualimap to assess alignment quality metrics.
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
if not os.path.exists(report_file):
ds_bam = bam.downsample(bam_file, data, 1e7)
bam_file = ds_bam if ds_bam else bam_file
utils.safe_makedir(out_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
qualimap = config_utils.get_program("qualimap", data["config"])
resources = config_utils.get_resources("qualimap", data["config"])
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} bamqc -bam {bam_file} -outdir {out_dir} "
"-nt {num_cores} --java-mem-size={max_mem}")
species = data["genome_resources"]["aliases"].get("ensembl", "").upper()
if species in ["HUMAN", "MOUSE"]:
cmd += " -gd {species}"
regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data)
if regions:
bed6_regions = _bed_to_bed6(regions, out_dir)
cmd += " -gff {bed6_regions}"
do.run(cmd.format(**locals()), "Qualimap: %s" % data["name"][-1])
return _parse_qualimap_metrics(report_file)
# ## RNAseq Qualimap
def _parse_metrics(metrics):
# skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)
missing = set(["Genes Detected", "Transcripts Detected",
"Mean Per Base Cov."])
correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
"Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate",
"Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0,
"No feature assigned": 0, "Duplication Rate of Mapped": 1,
"Fragment Length Mean": 1,
"rRNA": 1, "Ambiguou alignment": 0})
total = ["Not aligned", "Aligned to genes", "No feature assigned"]
out = {}
total_reads = sum([int(metrics[name]) for name in total])
out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads
out['Mapped'] = sum([int(metrics[name]) for name in total[1:]])
out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads
[out.update({name: 0}) for name in missing]
[metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct]
for name in to_change:
if not to_change[name]:
continue
try:
if to_change[name] == 1:
out.update({name: float(metrics[name])})
else:
out.update({to_change[name]: float(metrics[name])})
# if we can't convert metrics[name] to float (?'s or other non-floats)
except ValueError:
continue
return out
def _detect_duplicates(bam_file, out_dir, data):
"""
count duplicate percentage
"""
out_file = os.path.join(out_dir, "dup_metrics.txt")
if not utils.file_exists(out_file):
dup_align_bam = dedup_bam(bam_file, data)
num_cores = dd.get_num_cores(data)
with file_transaction(out_file) as tx_out_file:
sambamba = config_utils.get_program("sambamba", data, default="sambamba")
dup_count = ("{sambamba} view --nthreads {num_cores} --count "
"-F 'duplicate and not unmapped' "
"{bam_file} >> {tx_out_file}")
message = "Counting duplicates in {bam_file}.".format(bam_file=bam_file)
do.run(dup_count.format(**locals()), message)
tot_count = ("{sambamba} view --nthreads {num_cores} --count "
"-F 'not unmapped' "
"{bam_file} >> {tx_out_file}")
message = "Counting reads in {bam_file}.".format(bam_file=bam_file)
do.run(tot_count.format(**locals()), message)
with open(out_file) as in_handle:
dupes = float(in_handle.next().strip())
total = float(in_handle.next().strip())
return {"Duplication Rate of Mapped": dupes / total}
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
c, bio, source, s, e = line.split("\t")[:5]
if bio.startswith("rRNA"):
out_handle.write(("{0}:{1}-{2}\n").format(c, s, e))
def _detect_rRNA(config, bam_file, rRNA_file, ref_file, out_dir, single_end):
"""
Calculate rRNA with gatk-framework
"""
if not utils.file_exists(rRNA_file):
return {'rRNA': 0}
out_file = os.path.join(out_dir, "rRNA.counts")
if not utils.file_exists(out_file):
out_file = _count_rRNA_reads(bam_file, out_file, ref_file, rRNA_file, single_end, config)
with open(out_file) as in_handle:
for line in in_handle:
if line.find("CountReads counted") > -1:
rRNA_reads = line.split()[6]
break
return {'rRNA': rRNA_reads}
def _count_rRNA_reads(in_bam, out_file, ref_file, rRNA_interval, single_end, config):
"""Use GATK counter to count reads in rRNA genes
"""
bam.index(in_bam, config)
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
rRNA_coor = os.path.join(os.path.dirname(out_file), "rRNA.list")
_transform_browser_coor(rRNA_interval, rRNA_coor)
params = ["-T", "CountReads",
"-R", ref_file,
"-I", in_bam,
"-log", tx_out_file,
"-L", rRNA_coor,
"--filter_reads_with_N_cigar",
"-allowPotentiallyMisencodedQuals"]
jvm_opts = broad.get_gatk_framework_opts(config)
cmd = [config_utils.get_program("gatk-framework", config)] + jvm_opts + params
do.run(cmd, "counts rRNA for %s" % in_bam)
return out_file
def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
col = col.replace(":", "").strip()
val = val.replace(",", "")
m = {col: val}
if val.find("/") > -1:
m = _parse_num_pct(col, val.replace("%", ""))
out.update(m)
return out
def _parse_rnaseq_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"]
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(_parse_qualimap_rnaseq(table))
return out
def _rnaseq_qualimap(bam_file, data, out_dir):
"""
Run qualimap for a rnaseq bam file and parse results
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
config = data["config"]
gtf_file = dd.get_gtf_file(data)
ref_file = dd.get_ref_file(data)
single_end = not bam.is_paired(bam_file)
if not utils.file_exists(report_file):
utils.safe_makedir(out_dir)
bam.index(bam_file, config)
cmd = _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file, single_end)
do.run(cmd, "Qualimap for {}".format(data["name"][-1]))
metrics = _parse_rnaseq_qualimap_metrics(report_file)
metrics.update(_detect_duplicates(bam_file, out_dir, data))
metrics.update(_detect_rRNA(config, bam_file, gtf_file, ref_file, out_dir, single_end))
metrics.update({"Fragment Length Mean": bam.estimate_fragment_size(bam_file)})
metrics = _parse_metrics(metrics)
return metrics
def _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file=None, single_end=None):
"""
Create command lines for qualimap
"""
qualimap = config_utils.get_program("qualimap", config)
resources = config_utils.get_resources("qualimap", config)
num_cores = resources.get("cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "4G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} rnaseq -outdir {out_dir} -a proportional -bam {bam_file} "
"-gtf {gtf_file} --java-mem-size={max_mem}").format(**locals())
return cmd
# ## Lightweight QC approaches
def _parse_bamtools_stats(stats_file):
out = {}
want = set(["Total reads", "Mapped reads", "Duplicates", "Median insert size"])
with open(stats_file) as in_handle:
for line in in_handle:
parts = line.split(":")
if len(parts) == 2:
metric, stat_str = parts
metric = metric.split("(")[0].strip()
if metric in want:
stat_parts = stat_str.split()
if len(stat_parts) == 2:
stat, pct = stat_parts
pct = pct.replace("(", "").replace(")", "")
else:
stat = stat_parts[0]
pct = None
out[metric] = stat
if pct:
out["%s pct" % metric] = pct
return out
def _parse_offtargets(bam_file):
"""
Add to metrics off-targets reads if it exitst
"""
off_target = bam_file.replace(".bam", "-offtarget-stats.yaml")
if os.path.exists(off_target):
res = yaml.load(open(off_target))
res['offtarget_pct'] = "%.3f" % (float(res['offtarget']) / float(res['mapped']))
return res
return {}
def _run_bamtools_stats(bam_file, data, out_dir):
"""Run bamtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file = os.path.join(out_dir, "bamtools_stats.txt")
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
bamtools = config_utils.get_program("bamtools", data["config"])
with file_transaction(data, stats_file) as tx_out_file:
cmd = "{bamtools} stats -in {bam_file}"
if bam.is_paired(bam_file):
cmd += " -insert"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "bamtools stats", data)
out = _parse_bamtools_stats(stats_file)
out.update(_parse_offtargets(bam_file))
return out
## Variant statistics from gemini
def _run_gemini_stats(bam_file, data, out_dir):
"""Retrieve high level variant statistics from Gemini.
"""
out = {}
gemini_dbs = [d for d in
[tz.get_in(["population", "db"], x) for x in data.get("variants", [])] if d]
if len(gemini_dbs) > 0:
gemini_db = gemini_dbs[0]
gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0]
if not utils.file_uptodate(gemini_stat_file, gemini_db):
gemini = config_utils.get_program("gemini", data["config"])
tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db])
gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db])
dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q",
"SELECT count(*) FROM variants WHERE in_dbsnp==1"])
out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1]
for line in gt_counts.split("\n"):
parts = line.rstrip().split()
if len(parts) > 0 and parts[0] != "sample":
name, hom_ref, het, hom_var, _, total = parts
out[name] = {}
out[name]["Variations (heterozygous)"] = int(het)
out[name]["Variations (homozygous)"] = int(hom_var)
# same total variations for all samples, keep that top level as well.
out["Variations (total)"] = int(total)
out["Variations (in dbSNP)"] = int(dbsnp_count.strip())
if out.get("Variations (total)") > 0:
out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] /
float(out["Variations (total)"]) * 100.0)
with open(gemini_stat_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
else:
with open(gemini_stat_file) as in_handle:
out = yaml.safe_load(in_handle)
res = {}
for k, v in out.iteritems():
if not isinstance(v, dict):
res.update({k: v})
if k == data['name'][-1]:
res.update(v)
return res
## qsignature
def _run_qsignature_generator(bam_file, data, out_dir):
""" Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (dict) dict with the normalize vcf file
"""
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in(['genome_build'], data))
return {}
jvm_opts = "-Xms750m -Xmx2g"
limit_reads = 20000000
if mixup_check == "qsignature_full":
slice_bam = bam_file
jvm_opts = "-Xms750m -Xmx8g"
limit_reads = 100000000
else:
slice_bam = _slice_chr22(bam_file, data)
qsig = config_utils.get_program("qsignature", data["config"])
if not qsig:
return {}
utils.safe_makedir(out_dir)
out_name = os.path.basename(slice_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_file} ")
if not os.path.exists(out_file):
down_file = bam.downsample(slice_bam, data, limit_reads)
if not down_file:
down_file = slice_bam
file_qsign_out = "{0}.qsig.vcf".format(down_file)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % data["name"][-1])
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return {'qsig_vcf': out_file}
return {}
def qsignature_summary(*samples):
"""Run SignatureCompareRelatedSimple module from qsignature tool.
Creates a matrix of pairwise comparison among samples. The
function will not run if the output exists
:param samples: list with only one element containing all samples information
:returns: (dict) with the path of the output to be joined to summary
"""
warnings, similar = [], []
qsig = config_utils.get_program("qsignature", samples[0][0]["config"])
if not qsig:
return [[]]
jvm_opts = "-Xms750m -Xmx8g"
work_dir = samples[0][0]["dirs"]["work"]
count = 0
for data in samples:
data = data[0]
vcf = tz.get_in(["summary", "metrics", "qsig_vcf"], data)
if vcf:
count += 1
vcf_name = data["name"][-1] + ".qsig.vcf"
out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature"))
if not os.path.lexists(os.path.join(out_dir, vcf_name)):
os.symlink(vcf, os.path.join(out_dir, vcf_name))
if count > 0:
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
out_file = os.path.join(qc_out_dir, "qsignature.xml")
out_ma_file = os.path.join(qc_out_dir, "qsignature.ma")
out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings")
log = os.path.join(work_dir, "qsignature", "qsig-summary.log")
if not os.path.exists(out_file):
with file_transaction(samples[0][0], out_file) as file_txt_out:
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureCompareRelatedSimple "
"-log {log} -dir {out_dir} "
"-o {file_txt_out} ")
do.run(base_cmd.format(**locals()), "qsignature score calculation")
error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file,
out_warn_file, samples[0][0])
return [{'total samples': count,
'similar samples pairs': len(similar),
'warnings samples pairs': len(warnings),
'error samples': list(error),
'out_dir': qc_out_dir}]
else:
return []
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = lxml.etree.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar
def _slice_chr22(in_bam, data):
"""
return only one BAM file with only chromosome 22
"""
sambamba = config_utils.get_program("sambamba", data["config"])
out_file = "%s-chr%s" % os.path.splitext(in_bam)
if not utils.file_exists(out_file):
bam.index(in_bam, data['config'])
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
chromosome = "22"
if "chr22" in bam_contigs:
chromosome = "chr22"
with file_transaction(data, out_file) as tx_out_file:
cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return out_file
## report and coverage
def report_summary(samples, run_parallel):
"""
Run coverage report with bcbiocov package
"""
work_dir = dd.get_work_dir(samples[0][0])
parent_dir = utils.safe_makedir(os.path.join(work_dir, "report"))
qsignature_fn = os.path.join(work_dir, "qc", "qsignature", "qsignature.ma")
with utils.chdir(parent_dir):
logger.info("copy qsignature")
if qsignature_fn:
if utils.file_exists(qsignature_fn) and not utils.file_exists("qsignature.ma"):
shutil.copy(qsignature_fn, "qsignature.ma")
out_dir = utils.safe_makedir("fastqc")
logger.info("summarize fastqc")
with utils.chdir(out_dir):
_merge_fastqc(samples)
out_dir = utils.safe_makedir("coverage")
out_dir = utils.safe_makedir("variants")
samples = run_parallel("coverage_report", samples)
try:
import bcbreport.prepare as bcbreport
bcbreport.report(parent_dir)
except:
logger.info("skipping report. No bcbreport installed.")
pass
logger.info("summarize metrics")
samples = _merge_metrics(samples)
return samples
def coverage_report(data):
"""
Run heavy coverage and variants process in parallel
"""
data = cov.coverage(data)
data = cov.variants(data)
data = cov.priority_coverage(data)
data = cov.priority_total_coverage(data)
problem_regions = dd.get_problem_region_dir(data)
name = dd.get_sample_name(data)
if "coverage" in data:
coverage = data['coverage']
annotated = None
if problem_regions and coverage:
annotated = decorate_problem_regions(coverage, problem_regions)
data['coverage'] = {'all': coverage, 'problems': annotated}
return [[data]]
def _get_coverage_per_region(name):
"""
Parse coverage file if it exists to get average value.
"""
fn = os.path.join("coverage", name + "_coverage.bed")
if utils.file_exists(fn):
try:
dt = pd.read_csv(fn, sep="\t", index_col=False)
return "%.3f" % (sum(map(float, dt['meanCoverage'])) / len(dt['meanCoverage']))
except TypeError:
logger.debug("%s has no lines in coverage.bed" % name)
return "NA"
def _merge_metrics(samples):
"""
parse project.yaml file to get metrics for each bam
"""
out_file = os.path.join("metrics", "metrics.tsv")
dt_together = []
cov = {}
with file_transaction(out_file) as out_tx:
for s in samples:
s = s[0]
m = tz.get_in(['summary', 'metrics'], s)
if m:
for me in m:
if isinstance(m[me], list):
m[me] = ":".join(m[me])
dt = pd.DataFrame(m, index=['1'])
dt['avg_coverage_per_region'] = _get_coverage_per_region(s['description'])
cov[s['description']] = dt['avg_coverage_per_region'][0]
# dt = pd.DataFrame.from_dict(m)
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = s['description']
dt_together.append(dt)
if len(dt_together) > 0:
dt_together = utils.rbind(dt_together)
dt_together.to_csv(out_tx, index=False, sep="\t")
for i, s in enumerate(samples):
if s[0]['description'] in cov:
samples[i][0]['summary']['metrics']['avg_coverage_per_region'] = cov[s[0]['description']]
return samples
def _merge_fastqc(data):
"""
merge all fastqc samples into one by module
"""
fastqc_list = defaultdict(list)
for sample in data:
name = dd.get_sample_name(sample[0])
fns = glob.glob(os.path.join(dd.get_work_dir(sample[0]), "qc", dd.get_sample_name(sample[0]), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode = 'w')
return [data]
|
guillermo-carrasco/bcbio-nextgen
|
bcbio/pipeline/qcsummary.py
|
Python
|
mit
| 49,963
|
[
"pysam"
] |
dc74a034db9e35ed30d4312f9125bfb02b61899fda9b1a87fa9364551a9a99e9
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
val = os.getenv('LIB%s' % lib)
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("\$\(MPIF90\) \$\(F90FLAGS\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
self.log.error("Failed to patch %s: %s" % (fn, err))
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90.sys')
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
self.log.error("Failed to patch %s: %s" % (fn, err))
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
self.log.error("Found more than one directory with %s prefix, help!" % wantprefix)
if len(wantdirs) != 0:
fn = os.path.join(self.builddir, wantdirs[0], 'conf', 'make.sys.in')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
self.log.error("Failed to patch %s: %s" % (fn, err))
# move non-espresso directories to where they're expected and create symlinks
try:
dirnames = [d for d in os.listdir(self.builddir) if not d.startswith('espresso')]
targetdir = os.path.join(self.builddir, "espresso-%s" % self.version)
for dirname in dirnames:
shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname))
self.log.info("Moved %s into %s" % (dirname, targetdir))
dirname_head = dirname.split('-')[0]
linkname = None
if dirname_head == 'sax':
linkname = 'SaX'
if dirname_head == 'wannier90':
linkname = 'W90'
elif dirname_head in ['gipaw', 'plumed', 'want', 'yambo']:
linkname = dirname_head.upper()
if linkname:
os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname))
except OSError, err:
self.log.error("Failed to move non-espresso directories: %s" % err)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for Quantum ESPRESSO."""
# build list of expected binaries based on make targets
bins = ["iotk", "iotk.x", "iotk_print_kinds.x"]
if 'cp' in self.cfg['makeopts'] or 'all' in self.cfg['makeopts']:
bins.extend(["cp.x", "cppp.x", "wfdd.x"])
if 'gww' in self.cfg['makeopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"])
if 'ld1' in self.cfg['makeopts'] or 'all' in self.cfg['makeopts']:
bins.extend(["ld1.x"])
if 'gipaw' in self.cfg['makeopts']:
bins.extend(["gipaw.x"])
if 'neb' in self.cfg['makeopts'] or 'pwall' in self.cfg['makeopts'] or \
'all' in self.cfg['makeopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["neb.x", "path_interpolation.x"])
if 'ph' in self.cfg['makeopts'] or 'all' in self.cfg['makeopts']:
bins.extend(["d3.x", "dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["fqha.x", "q2qstar.x"])
if 'pp' in self.cfg['makeopts'] or 'pwall' in self.cfg['makeopts'] or \
'all' in self.cfg['makeopts']:
bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x",
"plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x",
"projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw_export.x", "pw2gw.x",
"wannier_ham.x", "wannier_plot.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["pw2bgw.x", "bgw2pw.x"])
else:
bins.extend(["pw2casino.x"])
if 'pw' in self.cfg['makeopts'] or 'all' in self.cfg['makeopts']:
bins.extend(["band_plot.x", "dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x",
"bands_FS.x", "kvecs_FS.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["generate_vdW_kernel_table.x"])
else:
bins.extend(["path_int.x"])
if 'pwcond' in self.cfg['makeopts'] or 'pwall' in self.cfg['makeopts'] or \
'all' in self.cfg['makeopts']:
bins.extend(["pwcond.x"])
if 'tddfpt' in self.cfg['makeopts'] or 'all' in self.cfg['makeopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"])
upftools = []
if 'upf' in self.cfg['makeopts'] or 'all' in self.cfg['makeopts']:
upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x",
"oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x",
"virtual.x"]
if LooseVersion(self.version) > LooseVersion("5"):
upftools.extend(["interpolate.x", "upf2casino.x"])
if 'vdw' in self.cfg['makeopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["vdw.x"])
if 'w90' in self.cfg['makeopts']:
bins.extend(["wannier90.x"])
want_bins = []
if 'want' in self.cfg['makeopts']:
want_bins = ["bands.x", "blc2wan.x", "conductor.x", "current.x", "disentangle.x",
"dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos",
"wannier.x", "wfk2etsf.x"]
if LooseVersion(self.version) > LooseVersion("5"):
want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"])
if 'xspectra' in self.cfg['makeopts']:
bins.extend(["xspectra.x"])
yambo_bins = []
if 'yambo' in self.cfg['makeopts']:
yambo_bins = ["a2y", "p2y", "yambo", "ypp"]
pref = self.install_subdir
custom_paths = {
'files': [os.path.join(pref, 'bin', x) for x in bins] +
[os.path.join(pref, 'upftools', x) for x in upftools] +
[os.path.join(pref, 'WANT', 'bin', x) for x in want_bins] +
[os.path.join(pref, 'YAMBO', 'bin', x) for x in yambo_bins],
'dirs': [os.path.join(pref, 'include')]
}
super(EB_QuantumESPRESSO, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path suggestions for Quantum ESPRESSO."""
guesses = super(EB_QuantumESPRESSO, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.install_subdir, x) for x in ['bin', 'upftools',
'WANT/bin',
'YAMBO/bin']],
'CPATH': [os.path.join(self.install_subdir, 'include')],
})
return guesses
|
hajgato/easybuild-easyblocks
|
easybuild/easyblocks/q/quantumespresso.py
|
Python
|
gpl-2.0
| 14,480
|
[
"ESPResSo",
"Quantum ESPRESSO",
"Wannier90",
"Yambo"
] |
d714b402a609b148ab3f1f4d637da0c8af01d1e63d44d5cbb06b53dbb28825aa
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field ephys_props on 'DataTable'
db.create_table('neuroelectro_datatable_ephys_props', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datatable', models.ForeignKey(orm['neuroelectro.datatable'], null=False)),
('ephysprop', models.ForeignKey(orm['neuroelectro.ephysprop'], null=False))
))
db.create_unique('neuroelectro_datatable_ephys_props', ['datatable_id', 'ephysprop_id'])
def backwards(self, orm):
# Removing M2M table for field ephys_props on 'DataTable'
db.delete_table('neuroelectro_datatable_ephys_props')
models = {
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'ephys_props': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysProp']", 'null': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
'neuroelectro.datatabletag': {
'Meta': {'object_name': 'DataTableTag'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"})
},
'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronephyslink': {
'Meta': {'object_name': 'NeuronEphysLink'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']"}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_err': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.superprotein': {
'Meta': {'object_name': 'SuperProtein'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['neuroelectro']
|
neuroelectro/neuroelectro_org
|
neuroelectro/south_migrations/0015_auto.py
|
Python
|
gpl-2.0
| 10,875
|
[
"NEURON"
] |
fc3b7ffc79e253cc2803d6c7a49bc96bb7f4cdb6d3fd58ef4bfe545865762891
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''
repoze.who plugin to authenticate against hte Fedora Account System
.. moduleauthor:: John (J5) Palmieri <johnp@redhat.com>
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
.. moduleauthor:: Toshio Kuratomi <toshio@fedoraproject.org>
.. versionadded:: 0.3.17
.. versionchanged:: 0.3.26
- Added secure and httponly as optional attributes to the session cookie
- Removed too-aggressive caching (wouldn't detect logout from another app)
- Added ability to authenticate and request a page in one request
'''
import os
import sys
from urllib import quote_plus
import logging
import pkg_resources
from beaker.cache import Cache
from munch import Munch
from kitchen.text.converters import to_bytes, exception_to_bytes
from paste.httpexceptions import HTTPFound
from repoze.who.middleware import PluggableAuthenticationMiddleware
from repoze.who.classifiers import default_request_classifier
from repoze.who.classifiers import default_challenge_decider
from repoze.who.interfaces import IChallenger, IIdentifier
from repoze.who.plugins.basicauth import BasicAuthPlugin
from repoze.who.plugins.friendlyform import FriendlyFormPlugin
from paste.request import parse_dict_querystring, parse_formvars
import webob
from fedora.client import AuthError
from fedora.client.fasproxy import FasProxyClient
from fedora.wsgi.csrf import CSRFMetadataProvider, CSRFProtectionMiddleware
log = logging.getLogger(__name__)
FAS_URL = 'https://admin.fedoraproject.org/accounts/'
FAS_CACHE_TIMEOUT = 900 # 15 minutes (FAS visits timeout after 20)
fas_cache = Cache('fas_repozewho_cache', type='memory')
def fas_request_classifier(environ):
classifier = default_request_classifier(environ)
if classifier == 'browser':
request = webob.Request(environ)
if not request.accept.best_match(
['application/xhtml+xml', 'text/html']):
classifier = 'app'
return classifier
def make_faswho_middleware(
app, log_stream=None,
login_handler='/login_handler',
login_form_url='/login',
logout_handler='/logout_handler',
post_login_url='/post_login', post_logout_url=None, fas_url=FAS_URL,
insecure=False, ssl_cookie=True, httponly=True):
'''
:arg app: WSGI app that is being wrapped
:kwarg log_stream: :class:`logging.Logger` to log auth messages
:kwarg login_handler: URL where the login form is submitted
:kwarg login_form_url: URL where the login form is displayed
:kwarg logout_handler: URL where the logout form is submitted
:kwarg post_login_url: URL to redirect the user to after login
:kwarg post_logout_url: URL to redirect the user to after logout
:kwarg fas_url: Base URL to the FAS server
:kwarg insecure: Allow connecting to a fas server without checking the
server's SSL certificate. Opens you up to MITM attacks but can be
useful when testing. *Do not enable this in production*
:kwarg ssl_cookie: If :data:`True` (default), tell the browser to only
send the session cookie back over https.
:kwarg httponly: If :data:`True` (default), tell the browser that the
session cookie should only be read for sending to a server, not for
access by JavaScript or other clientside technology. This prevents
using the session cookie to pass information to JavaScript clients but
also prevents XSS attacks from stealing the session cookie
information.
'''
# Because of the way we override values (via a dict in AppConfig), we
# need to make this a keyword arg and then check it here to make it act
# like a positional arg.
if not log_stream:
raise TypeError(
'log_stream must be set when calling make_fasauth_middleware()')
faswho = FASWhoPlugin(fas_url, insecure=insecure, ssl_cookie=ssl_cookie,
httponly=httponly)
csrf_mdprovider = CSRFMetadataProvider()
form = FriendlyFormPlugin(login_form_url,
login_handler,
post_login_url,
logout_handler,
post_logout_url,
rememberer_name='fasident',
charset='utf-8')
form.classifications = {IIdentifier: ['browser'],
IChallenger: ['browser']} # only for browser
basicauth = BasicAuthPlugin('repoze.who')
identifiers = [
('form', form),
('fasident', faswho),
('basicauth', basicauth)
]
authenticators = [('fasauth', faswho)]
challengers = [('form', form), ('basicauth', basicauth)]
mdproviders = [('fasmd', faswho), ('csrfmd', csrf_mdprovider)]
if os.environ.get('FAS_WHO_LOG'):
log_stream = sys.stdout
app = CSRFProtectionMiddleware(app)
app = PluggableAuthenticationMiddleware(
app,
identifiers,
authenticators,
challengers,
mdproviders,
fas_request_classifier,
default_challenge_decider,
log_stream=log_stream,
)
return app
class FASWhoPlugin(object):
def __init__(self, url, insecure=False, session_cookie='tg-visit',
ssl_cookie=True, httponly=True):
self.url = url
self.insecure = insecure
self.fas = FasProxyClient(url, insecure=insecure)
self.session_cookie = session_cookie
self.ssl_cookie = ssl_cookie
self.httponly = httponly
self._session_cache = {}
self._metadata_plugins = []
for entry in pkg_resources.iter_entry_points(
'fas.repoze.who.metadata_plugins'):
self._metadata_plugins.append(entry.load())
def _retrieve_user_info(self, environ, auth_params=None):
''' Retrieve information from fas and cache the results.
We need to retrieve the user fresh every time because we need to
know that the password hasn't changed or the session_id hasn't
been invalidated by the user logging out.
'''
if not auth_params:
return None
user_data = self.fas.get_user_info(auth_params)
if not user_data:
self.forget(environ, None)
return None
if isinstance(user_data, tuple):
user_data = list(user_data)
# Set session_id in here so it can be found by other plugins
user_data[1]['session_id'] = user_data[0]
# we don't define permissions since we don't have any peruser data
# though other services may wish to add another metadata plugin to do
# so
if not 'permissions' in user_data[1]:
user_data[1]['permissions'] = set()
# we keep the approved_memberships list because there is also an
# unapproved_membership field. The groups field is for repoze.who
# group checking and may include other types of groups besides
# memberships in the future (such as special fedora community groups)
groups = set()
for g in user_data[1]['approved_memberships']:
groups.add(g['name'])
user_data[1]['groups'] = groups
# If we have information on the user, cache it for later
fas_cache.set_value(user_data[1]['username'], user_data,
expiretime=FAS_CACHE_TIMEOUT)
return user_data
def identify(self, environ):
'''Extract information to identify a user
Retrieve either a username and password or a session_id that can be
passed on to FAS to authenticate the user.
'''
log.info('in identify()')
# friendlyform compat
if not 'repoze.who.logins' in environ:
environ['repoze.who.logins'] = 0
req = webob.Request(environ, charset='utf-8')
cookie = req.cookies.get(self.session_cookie)
# This is compatible with TG1 and it gives us a way to authenticate
# a user without making two requests
query = req.GET
form = Munch(req.POST)
form.update(query)
if form.get('login', None) == 'Login' and \
'user_name' in form and \
'password' in form:
identity = {
'login': form['user_name'],
'password': form['password']
}
keys = ('login', 'password', 'user_name')
for k in keys:
if k in req.GET:
del(req.GET[k])
if k in req.POST:
del(req.POST[k])
return identity
if cookie is None:
return None
log.info('Request identify for cookie %(cookie)s' %
{'cookie': to_bytes(cookie)})
try:
user_data = self._retrieve_user_info(
environ,
auth_params={'session_id': cookie})
except Exception as e: # pylint:disable-msg=W0703
# For any exceptions, returning None means we failed to identify
log.warning(e)
return None
if not user_data:
return None
# Preauthenticated
identity = {'repoze.who.userid': user_data[1]['username'],
'login': user_data[1]['username'],
'password': user_data[1]['password']}
return identity
def remember(self, environ, identity):
log.info('In remember()')
result = []
user_data = fas_cache.get_value(identity['login'])
try:
session_id = user_data[0]
except Exception:
return None
set_cookie = ['%s=%s; Path=/;' % (self.session_cookie, session_id)]
if self.ssl_cookie:
set_cookie.append('Secure')
if self.httponly:
set_cookie.append('HttpOnly')
set_cookie = '; '.join(set_cookie)
result.append(('Set-Cookie', set_cookie))
return result
def forget(self, environ, identity):
log.info('In forget()')
# return a expires Set-Cookie header
user_data = fas_cache.get_value(identity['login'])
try:
session_id = user_data[0]
except Exception:
return None
log.info('Forgetting login data for cookie %(s_id)s' %
{'s_id': to_bytes(session_id)})
self.fas.logout(session_id)
result = []
fas_cache.remove_value(key=identity['login'])
expired = '%s=\'\'; Path=/; Expires=Sun, 10-May-1971 11:59:00 GMT'\
% self.session_cookie
result.append(('Set-Cookie', expired))
return result
# IAuthenticatorPlugin
def authenticate(self, environ, identity):
log.info('In authenticate()')
def set_error(msg):
log.info(msg)
err = 1
environ['FAS_AUTH_ERROR'] = err
# HTTPForbidden ?
err_app = HTTPFound(err_goto + '?' +
'came_from=' + quote_plus(came_from))
environ['repoze.who.application'] = err_app
err_goto = '/login'
default_came_from = '/'
if 'SCRIPT_NAME' in environ:
sn = environ['SCRIPT_NAME']
err_goto = sn + err_goto
default_came_from = sn + default_came_from
query = parse_dict_querystring(environ)
form = parse_formvars(environ)
form.update(query)
came_from = form.get('came_from', default_came_from)
try:
auth_params = {'username': identity['login'],
'password': identity['password']}
except KeyError:
try:
auth_params = {'session_id': identity['session_id']}
except:
# On error we return None which means that auth failed
set_error('Parameters for authenticating not found')
return None
try:
user_data = self._retrieve_user_info(environ, auth_params)
except AuthError as e:
set_error('Authentication failed: %s' % exception_to_bytes(e))
log.warning(e)
return None
except Exception as e:
set_error('Unknown auth failure: %s' % exception_to_bytes(e))
return None
if user_data:
try:
del user_data[1]['password']
environ['CSRF_AUTH_SESSION_ID'] = user_data[0]
return user_data[1]['username']
except ValueError:
set_error('user information from fas not in expected format!')
return None
except Exception:
pass
set_error('An unknown error happened when trying to log you in.'
' Please try again.')
return None
def add_metadata(self, environ, identity):
log.info('In add_metadata')
if identity.get('error'):
log.info('Error exists in session, no need to set metadata')
return 'error'
plugin_user_info = {}
for plugin in self._metadata_plugins:
plugin(plugin_user_info)
identity.update(plugin_user_info)
del plugin_user_info
user = identity.get('repoze.who.userid')
(session_id, user_info) = fas_cache.get_value(
key=user,
expiretime=FAS_CACHE_TIMEOUT)
#### FIXME: Deprecate this line!!!
# If we make a new version of fas.who middleware, get rid of saving
# user information directly into identity. Instead, save it into
# user, as is done below
identity.update(user_info)
identity['userdata'] = user_info
identity['user'] = Munch()
identity['user'].created = user_info['creation']
identity['user'].display_name = user_info['human_name']
identity['user'].email_address = user_info['email']
identity['user'].groups = user_info['groups']
identity['user'].password = None
identity['user'].permissions = user_info['permissions']
identity['user'].user_id = user_info['id']
identity['user'].user_name = user_info['username']
identity['groups'] = user_info['groups']
identity['permissions'] = user_info['permissions']
if 'repoze.what.credentials' not in environ:
environ['repoze.what.credentials'] = {}
environ['repoze.what.credentials']['groups'] = user_info['groups']
permissions = user_info['permissions']
environ['repoze.what.credentials']['permissions'] = permissions
# Adding the userid:
userid = identity['repoze.who.userid']
environ['repoze.what.credentials']['repoze.what.userid'] = userid
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, id(self))
|
vivekanand1101/python-fedora
|
fedora/wsgi/faswho/faswhoplugin.py
|
Python
|
gpl-2.0
| 15,588
|
[
"VisIt"
] |
d66e1da22d02de10c6235edc25cbc606cb6289039241dfe8e8643867e7b71fbb
|
#!/usr/bin/env python2.7
import os, sys
from numpy import *
import numpy.random
### Runs replica exchange with gREM (fix grem) for unlimited number of replicas on a set number of processors. This script is inefficient, but necessary if wanting to run with hundreds of replicas on relatively few number of procs.
### read number of processors from the command line
nproc = int(sys.argv[1])
### path to simulation directory
path = os.getcwd()
### path to LAMMPS executable
lmp = sys.argv[2]
### LAMMPS input name
inp = sys.argv[3]
### define pressure for simulations (0 if const V)
pressure = 0
### some constants for gREM, must match with LAMMPS input file!
H = -30000
eta = -0.01
#kB = 0.000086173324 # eV (metal)
kB = 0.0019872 # kcal/mol (real)
### define lambdas - script assumes that there are already existing directories with all files necessary to run
lambdas=[400,405,410,415,420,425]
ll = len(lambdas)
### define number of exchanges
starting_ex = int(loadtxt("lastexchange"))
how_many_ex = 5
max_exchange = starting_ex+how_many_ex
### array with walkers
walker = loadtxt("lastwalker")
### initiate array with enthalpies
enthalpy = zeros(ll)
aver_enthalpy = zeros(ll)
for exchange in arange(starting_ex,max_exchange):
print "run", exchange
for l in range(ll):
#print "replica", l
os.chdir(path+"/%s" % lambdas[l])
#os.system("cp restart_file restart_file%d" % exchange)
if (nproc > 1):
os.system("mpirun -np %d " % (nproc) + lmp + " -in ../" + inp + " -var lambda %g -var eta %g -var enthalpy %g > output" % (lambdas[l], eta, H))
if (nproc == 1):
os.system(lmp + " -in ../" + inp + " -var lambda %g -var eta %g -var enthalpy %g > output" % (lambdas[l], eta, H))
os.system("grep -v '[a-zA-Z]' output | awk '{if(NF==6 && NR>19)print $0}' | awk '{print $3}' >ent")
enthalpy[l] = os.popen("tail -n 1 ent").read()
ee = loadtxt("ent")
aver_enthalpy[l] = mean(ee[-1])
# os.system("mv dump.dcd dump%d.dcd" % exchange)
os.system("mv log.lammps log%d.lammps" % exchange)
os.system("mv final_restart_file final_restart_file%d" % exchange)
os.system("mv ent ent%d" % exchange)
os.system("bzip2 log%d.lammps ent%d" % (exchange,exchange))
os.system("cp final_restart_file%d restart_file" % exchange)
### replicas will be exchanged based on enthalpy order, not replicas order (termostat order)
#entalpy_sorted_indices = enthalpy.argsort()
aver_entalpy_sorted_indices = aver_enthalpy.argsort()
### choose pair of replicas for exchange attempt based on enthalpy order
pp = random.random_integers(0,ll-2)
first = aver_entalpy_sorted_indices[pp]
second = aver_entalpy_sorted_indices[pp+1]
#if (first>second):
# tmp = first
# first = second
# second = tmp
print "pair1:", first, second
### calculate weights for exchange criterion
w1 = log(lambdas[first]+eta*(enthalpy[first]-1*H))
w2 = log(lambdas[first]+eta*(enthalpy[second]-1*H))
w3 = log(lambdas[second]+eta*(enthalpy[first]-1*H))
w4 = log(lambdas[second]+eta*(enthalpy[second]-1*H))
weight = (w4-w3+w1-w2)/eta/kB
### generate randon number for exchange criterion and calc its log
LOGRANDNUM = log(random.random())
### wyzeruj warunki
compare1 = 0
compare2 = 0
if (weight>0):
compare1 = 1
if (weight>LOGRANDNUM):
compare2 = 1
### exchange restart files if exchange condition is satisfied
if (compare1>0 or compare2>0):
print "exchange1 accepted for pair", first, second, lambdas[first], lambdas[second], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[first],exchange,path,lambdas[second]))
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[second],exchange,path,lambdas[first]))
### update walkers
tmp1=walker[first]
tmp2=walker[second]
walker[first]=tmp2
walker[second]=tmp1
else:
print "exchange1 not accepted for pair", first, second, lambdas[first], lambdas[second], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
### choose again pair of replicas for exchange attempt based on enthalpy order
### but make sure this pair is different than the first pair
if_different = 0
while if_different<1:
pp2 = random.random_integers(0,ll-2)
third = aver_entalpy_sorted_indices[pp2]
fourth = aver_entalpy_sorted_indices[pp2+1]
if (third!=first and third!=second and third!=aver_entalpy_sorted_indices[pp-1]):
if_different = 1
print "pair2:", third, fourth
### calculate weights for exchange criterion
w1 = log(lambdas[third]+eta*(enthalpy[third]-1*H))
w2 = log(lambdas[third]+eta*(enthalpy[fourth]-1*H))
w3 = log(lambdas[fourth]+eta*(enthalpy[third]-1*H))
w4 = log(lambdas[fourth]+eta*(enthalpy[fourth]-1*H))
weight = (w4-w3+w1-w2)/eta/kB
### generate randon number for exchange criterion and calc its log
LOGRANDNUM = log(random.random())
### wyzeruj warunki
compare1 = 0
compare2 = 0
if (weight>0):
compare1 = 1
if (weight>LOGRANDNUM):
compare2 = 1
### exchange restart files if exchange condition is satisfied
if (compare1>0 or compare2>0):
print "exchange2 accepted for pair", third, fourth, lambdas[third], lambdas[fourth], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[third],exchange,path,lambdas[fourth]))
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[fourth],exchange,path,lambdas[third]))
### update walkers
tmp1=walker[third]
tmp2=walker[fourth]
walker[third]=tmp2
walker[fourth]=tmp1
else:
print "exchange2 not accepted for pair", third, fourth, lambdas[third], lambdas[fourth], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
#print "walkers:", walker
print "".join(["%d " % x for x in walker])
sys.stdout.flush()
lastwalker = open(path + "/lastwalker", "w")
lastwalker.write("".join(["%d " % w for w in walker]))
lastwalker.close()
lastexchange = open(path + "/lastexchange", "w")
lastexchange.write("%d" % (exchange+1))
lastexchange.close()
|
ovilab/atomify
|
libs/lammps/examples/USER/misc/grem/lj-6rep/double-re-short.py
|
Python
|
gpl-3.0
| 6,218
|
[
"LAMMPS"
] |
ef7b6c14848224ca83aea346da8746cb3389a0f48c2a9db8170235966d7ffde9
|
'''
Created on 23/11/2009
@author: brian
'''
from scipysim.actors import Channel, CompositeActor
from scipysim.actors.display.bundlePlotter import BundlePlotter
from scipysim.actors.io import Bundle
from scipysim.actors.signal import Ramp, RandomSource
from scipysim.actors.math import Summer
import logging
logging.basicConfig(level=logging.INFO)
logging.info("Logger enabled")
class NoiseyRamp(CompositeActor):
"""
This model simulates a ramp source and a random source being added together
The signals are in sync - there are NO missing tags.
"""
def __init__(self):
'''Setup the simulation'''
connection1 = Channel()
connection2 = Channel()
connection3 = Channel()
connection4 = Channel()
src1 = Ramp(connection1)
src2 = RandomSource(connection2)
summer = Summer([connection1, connection2], connection3)
bundler = Bundle(connection3, connection4)
dst = BundlePlotter(connection4, title="Scipy-Simulation: Noise + Ramp Sum", show=True)
#dst = Plotter(connection3)
self.components = [src1, src2, summer, bundler, dst]
if __name__ == '__main__':
NoiseyRamp().run()
|
hardbyte/scipy-sim
|
scipysim/models/noisy_ramp_plot.py
|
Python
|
gpl-3.0
| 1,197
|
[
"Brian"
] |
d064641529b9136de3b2db5dfa4ea949e2a2c023f722f06addb35586b6e6efbe
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from third_party.py.concurrent.futures._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from third_party.py.concurrent.futures.process import ProcessPoolExecutor
from third_party.py.concurrent.futures.thread import ThreadPoolExecutor
|
juhalindfors/bazel-patches
|
third_party/py/concurrent/futures/__init__.py
|
Python
|
apache-2.0
| 965
|
[
"Brian"
] |
0e1c9c969ddcc99c5414d881774336f64f82b23c3e8d1c9aaaf69f25a1884b98
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""\
Core Topology object --- :mod:`MDAnalysis.core.topology`
========================================================
.. versionadded:: 0.16.0
:class:`Topology` is the core object that holds all topology information.
TODO: Add in-depth discussion.
Notes
-----
For developers: In MDAnalysis 0.16.0 this new topology system was
introduced and discussed as issue `#363`_; this issue contains key
information and discussions on the new system. The issue number *363*
is also being used as a short-hand in discussions to refer to the new
topology system.
.. _`#363`: https://github.com/MDAnalysis/mdanalysis/issues/363
Classes
-------
.. autoclass:: Topology
:members:
.. autoclass:: TransTable
:members:
Helper functions
----------------
.. autofunction:: make_downshift_arrays
"""
from __future__ import absolute_import
from six.moves import zip
import numpy as np
from .topologyattrs import Atomindices, Resindices, Segindices
from ..exceptions import NoDataError
# TODO Notes:
# Could make downshift tables lazily built! This would
# a) Make these not get built when not used
# b) Optimise moving multiple atoms between residues as only built once
# afterwards
# Could optimise moves by only updating the two parent tables rather than
# rebuilding everything!
def make_downshift_arrays(upshift, nparents):
"""From an upwards translation table, create the opposite direction
Turns a many to one mapping (eg atoms to residues) to a one to many mapping
(residues to atoms)
Parameters
----------
upshift : array_like
Array of integers describing which parent each item belongs to
nparents : integer
Total number of parents that exist.
Returns
-------
downshift : array_like (dtype object)
An array of arrays, each containing the indices of the children
of each parent. Length `nparents` + 1
Examples
--------
To find the residue to atom mappings for a given atom to residue mapping:
>>> atom2res = np.array([0, 1, 0, 2, 2, 0, 2])
>>> make_downshift_arrays(atom2res)
array([array([0, 2, 5]), array([1]), array([3, 4, 6]), None], dtype=object)
Entry 0 corresponds to residue 0 and says that this contains atoms 0, 2 & 5
Notes
-----
The final entry in the return array will be ``None`` to ensure that the
dtype of the array is :class:`object`.
.. warning:: This means negative indexing should **never**
be used with these arrays.
"""
order = np.argsort(upshift)
upshift_sorted = upshift[order]
borders = [None] + list(np.nonzero(np.diff(upshift_sorted))[0] + 1) + [None]
# returns an array of arrays
downshift = []
counter = -1
# don't use enumerate, we modify counter in place
for x, y in zip(borders[:-1], borders[1:]):
counter += 1
# If parent is skipped, eg (0, 0, 2, 2, etc)
while counter != upshift[order[x:y][0]]:
downshift.append(np.array([], dtype=np.int))
counter += 1
downshift.append(np.sort(np.array(order[x:y], copy=True, dtype=np.int)))
# Add entries for childless parents at end of range
while counter < (nparents - 1):
downshift.append(np.array([], dtype=np.int))
counter += 1
# Add None to end of array to force it to be of type Object
# Without this, a rectangular array gets squashed into a single array
downshift.append(None)
return np.array(downshift, dtype=object)
class TransTable(object):
"""Membership tables with methods to translate indices across levels.
There are three levels; Atom, Residue and Segment. Each Atom **must**
belong in a Residue, each Residue **must** belong to a Segment.
When translating upwards, eg finding which Segment a Residue belongs in,
a single numpy array is returned. When translating downwards, two options
are available; a concatenated result (suffix `_1`) or a list for each parent
object (suffix `_2d`).
Parameters
----------
n_atoms, n_residues, n_segments : int
number of atoms, residues, segments in topology
atom_resindex : 1-D array
resindex for each atom in the topology; the number of unique values in
this array must be <= `n_residues`, and the array must be length
`n_atoms`; giving None defaults to placing all atoms in residue 0
residue_segindex : 1-D array
segindex for each residue in the topology; the number of unique values
in this array must be <= `n_segments`, and the array must be length
`n_residues`; giving None defaults to placing all residues in segment 0
Attributes
----------
n_atoms, n_residues, n_segments : int
number of atoms, residues, segments in topology
size
tuple describing the shape of the TransTable
Methods
-------
atoms2residues(aix)
Returns the residue index for many atom indices
residues2atoms_1d(rix)
All atoms in the residues represented by *rix*
residues2atoms_2d(rix)
List of atom indices for each residue in *rix*
residues2segments(rix)
Segment indices for each residue in *rix*
segments2residues_1d(six)
Similar to `residues2atoms_1d`
segments2residues_2d(six)
Similar to `residues2atoms_2d`
atoms2segments(aix)
Segment indices for each atom in *aix*
segments2atoms_1d(six)
Similar to `residues2atoms_1d`
segments2atoms_2d(six)
Similar to `residues2atoms_2d`
"""
def __init__(self,
n_atoms, n_residues, n_segments, # Size of tables
atom_resindex=None, residue_segindex=None, # Contents of tables
):
self.n_atoms = n_atoms
self.n_residues = n_residues
self.n_segments = n_segments
# built atom-to-residue mapping, and vice-versa
if atom_resindex is None:
self._AR = np.zeros(n_atoms, dtype=np.int64)
else:
self._AR = atom_resindex.copy()
if not len(self._AR) == n_atoms:
raise ValueError("atom_resindex must be len n_atoms")
self._RA = make_downshift_arrays(self._AR, n_residues)
# built residue-to-segment mapping, and vice-versa
if residue_segindex is None:
self._RS = np.zeros(n_residues, dtype=np.int64)
else:
self._RS = residue_segindex.copy()
if not len(self._RS) == n_residues:
raise ValueError("residue_segindex must be len n_residues")
self._SR = make_downshift_arrays(self._RS, n_segments)
@property
def size(self):
"""The shape of the table, (n_atoms, n_residues, n_segments)"""
return (self.n_atoms, self.n_residues, self.n_segments)
def atoms2residues(self, aix):
"""Get residue indices for each atom.
Parameters
----------
aix : array
atom indices
Returns
-------
rix : array
residue index for each atom
"""
return self._AR[aix]
def residues2atoms_1d(self, rix):
"""Get atom indices collectively represented by given residue indices.
Parameters
----------
rix : array
residue indices
Returns
-------
aix : array
indices of atoms present in residues, collectively
"""
try:
return np.concatenate([self._RA[r] for r in rix])
except TypeError: # integers aren't iterable, raises TypeError
return self._RA[rix].copy() # don't accidentally send a view!
def residues2atoms_2d(self, rix):
"""Get atom indices represented by each residue index.
Parameters
----------
rix : array
residue indices
Returns
-------
raix : list
each element corresponds to a residue index, in order given in
`rix`, with each element being an array of the atom indices present
in that residue
"""
try:
return [self._RA[r].copy() for r in rix]
except TypeError:
return [self._RA[rix].copy()] # why would this be singular for 2d?
def residues2segments(self, rix):
"""Get segment indices for each residue.
Parameters
----------
rix : array
residue indices
Returns
-------
six : array
segment index for each residue
"""
return self._RS[rix]
def segments2residues_1d(self, six):
"""Get residue indices collectively represented by given segment indices
Parameters
----------
six : array
segment indices
Returns
-------
rix : array
sorted indices of residues present in segments, collectively
"""
try:
return np.concatenate([self._SR[s] for s in six])
except TypeError:
return self._SR[six].copy()
def segments2residues_2d(self, six):
"""Get residue indices represented by each segment index.
Parameters
----------
six : array
residue indices
Returns
-------
srix : list
each element corresponds to a segment index, in order given in
`six`, with each element being an array of the residue indices
present in that segment
"""
try:
return [self._SR[s].copy() for s in six]
except TypeError:
return [self._SR[six].copy()]
# Compound moves, does 2 translations
def atoms2segments(self, aix):
"""Get segment indices for each atom.
Parameters
----------
aix : array
atom indices
Returns
-------
rix : array
segment index for each atom
"""
rix = self.atoms2residues(aix)
return self.residues2segments(rix)
def segments2atoms_1d(self, six):
"""Get atom indices collectively represented by given segment indices.
Parameters
----------
six : array
segment indices
Returns
-------
aix : array
sorted indices of atoms present in segments, collectively
"""
rixs = self.segments2residues_2d(six)
return np.concatenate([self.residues2atoms_1d(rix)
for rix in rixs])
def segments2atoms_2d(self, six):
"""Get atom indices represented by each segment index.
Parameters
----------
six : array
residue indices
Returns
-------
saix : list
each element corresponds to a segment index, in order given in
`six`, with each element being an array of the atom indices present
in that segment
"""
# residues in EACH
rixs = self.segments2residues_2d(six)
return [self.residues2atoms_1d(rix) for rix in rixs]
# Move between different groups.
def move_atom(self, aix, rix):
"""Move aix to be in rix"""
self._AR[aix] = rix
self._RA = make_downshift_arrays(self._AR, self.n_residues)
def move_residue(self, rix, six):
"""Move rix to be in six"""
self._RS[rix] = six
self._SR = make_downshift_arrays(self._RS, self.n_segments)
def add_Residue(self, segidx):
# segidx - index of parent
self.n_residues += 1
self._RA = make_downshift_arrays(self._AR, self.n_residues)
self._RS = np.concatenate([self._RS, np.array([segidx])])
self._SR = make_downshift_arrays(self._RS, self.n_segments)
return self.n_residues - 1
def add_Segment(self):
self.n_segments += 1
# self._RS remains the same, no residues point to the new segment yet
self._SR = make_downshift_arrays(self._RS, self.n_segments)
return self.n_segments - 1
class Topology(object):
"""In-memory, array-based topology database.
The topology model of MDanalysis features atoms, which must each be a
member of one residue. Each residue, in turn, must be a member of one
segment. The details of maintaining this heirarchy, and mappings of atoms
to residues, residues to segments, and vice-versa, are handled internally
by this object.
Parameters
----------
n_atoms, n_residues, n_segments : int
number of atoms, residues, segments in topology; there must be at least
1 element of each level in the system
attrs : TopologyAttr objects
components of the topology to be included
atom_resindex : array
1-D array giving the resindex of each atom in the system
residue_segindex : array
1-D array giving the segindex of each residue in the system
"""
def __init__(self, n_atoms=1, n_res=1, n_seg=1,
attrs=None,
atom_resindex=None,
residue_segindex=None):
self.tt = TransTable(n_atoms, n_res, n_seg,
atom_resindex=atom_resindex,
residue_segindex=residue_segindex)
if attrs is None:
attrs = []
# add core TopologyAttrs that give access to indices
attrs.extend((Atomindices(), Resindices(), Segindices()))
# attach the TopologyAttrs
self.attrs = []
for topologyattr in attrs:
self.add_TopologyAttr(topologyattr)
@property
def n_atoms(self):
return self.tt.n_atoms
@property
def n_residues(self):
return self.tt.n_residues
@property
def n_segments(self):
return self.tt.n_segments
def add_TopologyAttr(self, topologyattr):
"""Add a new TopologyAttr to the Topology.
Parameters
----------
topologyattr : TopologyAttr
"""
self.attrs.append(topologyattr)
topologyattr.top = self
self.__setattr__(topologyattr.attrname, topologyattr)
@property
def guessed_attributes(self):
"""A list of the guessed attributes in this topology"""
return filter(lambda x: x.is_guessed, self.attrs)
@property
def read_attributes(self):
"""A list of the attributes read from the topology"""
return filter(lambda x: not x.is_guessed, self.attrs)
def add_Residue(self, segment, **new_attrs):
"""
Returns
-------
residx of the new Residue
Raises
------
NoDataError
If not all data was provided. This error is raised before any
"""
# Check that all data is here before making any changes
for attr in self.attrs:
if not attr.per_object == 'residue':
continue
if attr.singular not in new_attrs:
missing = (attr.singular for attr in self.attrs
if (attr.per_object == 'residue' and
attr.singular not in new_attrs))
raise NoDataError("Missing the following attributes for the new"
" Residue: {}".format(', '.join(missing)))
# Resize topology table
residx = self.tt.add_Residue(segment.segindex)
# Add new value to each attribute
for attr in self.attrs:
if not attr.per_object == 'residue':
continue
newval = new_attrs[attr.singular]
attr.values = np.concatenate([attr.values, np.array([newval])])
return residx
def add_Segment(self, **new_attrs):
for attr in self.attrs:
if attr.per_object == 'segment':
if attr.singular not in new_attrs:
missing = (attr.singular for attr in self.attrs
if (attr.per_object == 'segment' and
attr.singular not in new_attrs))
raise NoDataError("Missing the following attributes for the"
" new Segment: {}"
"".format(', '.join(missing)))
segidx = self.tt.add_Segment()
for attr in self.attrs:
if not attr.per_object == 'segment':
continue
newval = new_attrs[attr.singular]
attr.values = np.concatenate([attr.values, np.array([newval])])
return segidx
|
kain88-de/mdanalysis
|
package/MDAnalysis/core/topology.py
|
Python
|
gpl-2.0
| 17,617
|
[
"MDAnalysis"
] |
7f18398d5d16d35c8dbaf7bc859bb712f12a6280f4afe573255341b77b4a917b
|
import math
import collections
class Database(object):
# single points defining the 'centre' of an object or room that nodes will travel to
POINTS = {
'cupboard': (-3.6, 4.0),
'bedroom': (-3.6, 2.1),
'bathroom': (-3.6, -3.7),
'hallway_top': (-1.5, 1.9),
'hallway_mid': (-1.5, 0.45),
'hallway_bot': (-1.5, -3.7),
'door': (-1.2, -6),
'kitchen': (2.05, 3),
'living_room_middle': (2.05, 0.45),
'living_room_entrance': (1.5, 0.45),
'living_room_sofa': (1, -3),
# robot starting positions
'cook_idle': (0, 3),
'medication_idle': (-4.2, 4.4),
'entertainment_idle': (4.5, -4.5),
'companionship_idle': (-0.2, -10),
# human starting positions
'visitor_idle': (-1.2, -10),
'relative_idle': (-1.9, -13),
'nurse_idle': (6, -6),
'doctor_idle': (6, -7.5),
'caregiver_idle': (-6, -7.5),
'friend_idle' : (-6, -9),
'idle': (12, 4),
'bed': (-2.8, -1.1),
'kitchen_stove': (2.05, 3.5),
'sofa': (0.3, -3),
'gym': (-4.450, 2.850),
'sofa2': (1.500, -4.250),
'toilet': (-4.350, -4.500),
'sink': (-3.300, -3.400),
'bathtub': (-4.350, -3.250),
'fridge': (0.500, 3.000),
'dishwasher': (3.750, 3.500),
'piano': (0.8, -1.1)
}
# objects (mostly rooms) defined by their top left and bottom right points
OBJECTS = collections.OrderedDict([
('cupboard', ((-5, 5), (-2.2, 3.6))),
('bedroom', ((-5, 3.6), (-2.2, -2.3))),
('bathroom', ((-5, -2.3), (-2.2, -5))),
('hallway_mid', ((-2.2, 5), (-0.7, -5))),
('kitchen', ((-0.7, 5), (5, 2.2))),
('living_room_middle', ((-0.7, 2.2), (5, -5))),
('house', ((-5, 5), (5, -5))),
('visitor_idle', ((-4, -8), (-1, -11))),
('relative_idle', ((-3, -12), (-1, -14))),
('caregiver_idle', ((-8, -6), (-4, -8))),
('doctor_idle', ((5, -6), (7, -7))),
('nurse_idle', ((5, -7), (8, -7))),
('friend_idle', ((-8, -8), (-4, -11)))
])
# connected graph of points showing neighbours]
GRAPH = {
# Invisible nodes/points within our house
'cupboard': ['bedroom','medication_idle'],
'bedroom': ['bed','cupboard','gym','hallway_top','companionship_idle'],
'bathroom': ['bathtub','hallway_bot','sink','toilet'],
'hallway_top': ['bedroom','hallway_mid'],
'hallway_mid': ['hallway_bot','hallway_top','living_room_entrance'],
'hallway_bot': ['bathroom','door','hallway_mid'],
'door': ['hallway_bot','visitor_idle','caregiver_idle','nurse_idle','doctor_idle', 'relative_idle', 'friend_idle'],
'kitchen': ['dishwasher','fridge','living_room_middle','cook_idle','kitchen_stove'],
'living_room_middle': ['kitchen','living_room_entrance','living_room_sofa', 'piano'],
'living_room_entrance': ['hallway_mid','living_room_sofa','living_room_middle'],
'living_room_sofa': ['sofa','sofa2','piano','entertainment_idle','living_room_entrance','living_room_middle'],
# Robot idle positions
'cook_idle': ['kitchen'],
'medication_idle': ['cupboard'],
'entertainment_idle': ['living_room_middle'],
'companionship_idle': ['bedroom'],
# Human starting positions (excluding resident)
'visitor_idle': ['door'],
'nurse_idle': ['door'],
'doctor_idle': ['door'],
'caregiver_idle': ['door'],
'relative_idle': ['door'],
'friend_idle' : ['door'],
# Furniture
'bed': ['bedroom'],
'kitchen_stove': ['kitchen'],
'sofa': ['living_room_sofa'],
'gym': ['bedroom'],
'sofa2': ['living_room_sofa'],
'toilet': ['bathroom'],
'sink': ['bathroom'],
'bathtub': ['bathroom'],
'fridge': ['kitchen'],
'dishwasher': ['kitchen'],
'piano': ['living_room_sofa']
}
# assigning the names of each robot to their idle positions (robot_0 = cook_robot)
ROBOT_IDLES = {
'robot_0' : 'visitor_idle', #visitor
'robot_2' : 'cook_idle',
'robot_3' : 'nurse_idle',
'robot_4' : 'doctor_idle',
'robot_5' : 'entertainment_idle',
'robot_6' : 'companionship_idle',
'robot_7' : 'caregiver_idle',
'robot_8' : 'relative_idle',
'robot_9' : 'friend_idle',
'robot_10' : 'medication_idle'
}
# scheduled events, their priorities and warning messages
EVENTS = {
'Resident.wakeup': {
'explanation': 'Resident is currently waking up',
'priority': 3,
'destination': 'sofa',
'duration': 100
},
'Resident.sleep': {
'explanation': 'Resident is going to sleep',
'priority': 3,
'destination': 'bed',
'duration': 100
},
'Resident.eat_breakfast': {
'explanation': 'Resident is eating breakfast',
'priority': 3,
'destination': 'kitchen',
'duration': 100
},
'Resident.eat_lunch': {
'explanation': 'Resident is eating lunch',
'priority': 3,
'destination': 'kitchen',
'duration': 100
},
'Resident.eat_dinner': {
'explanation': 'Resident is eating dinner',
'priority': 3,
'destination': 'kitchen',
'duration': 100
},
'Resident.eat_snack': { # generated event for innovation
'explanation': 'Resident is eating a snack',
'priority': 1,
'destination': 'kitchen',
'duration': 100
},
'Resident.take_meds': {
'explanation': 'Resident is taking medication',
'priority': 1,
'destination': 'cupboard',
'duration': 100
},
'Resident.idle': {
'explanation': 'Resident is not doing anything',
'priority': 3,
'destination': 'sofa',
'duration': 100
},
'Resident.gym': {
'explanation': 'Resident is exercising',
'priority': 1,
'destination': 'gym',
'duration': 100
},
'Resident.toilet': {
'explanation': 'Resident is going toilet',
'priority': 1,
'destination': 'toilet',
'duration': 100
},
'Resident.bath': {
'explanation': 'Resident is having a bath',
'priority': 1,
'destination': 'bathtub',
'duration': 100
},
'Resident.heart_attack': {
'explanation': 'Resident is having a heart_attack',
'priority': 0,
'destination': 'bed',
'duration': 200
},
'Cook.cook_breakfast': {
'explanation': 'Cook robot is cooking breakfast',
'priority': 1,
'destination': 'kitchen',
'duration': 100
},
'Cook.cook_lunch': {
'explanation': 'Cook robot is cooking lunch',
'priority': 1,
'destination': 'kitchen',
'duration': 100
},
'Cook.cook_dinner': {
'explanation': 'Cook robot is cooking dinner',
'priority': 1,
'destination': 'kitchen',
'duration': 100
},
'Visitor.visit': {
'explanation': 'Someone is visiting the house',
'priority': 1,
'destination': 'living_room_middle',
'duration': 5
},
'Resident.status_eat_med': {
'explanation': 'Resident is eating',
'priority': 3,
'destination': 'kitchen',
'duration': 100
},
'Resident.status_eat_low': {
'explanation': 'Resident is eating',
'priority': 2,
'destination': 'kitchen',
'duration': 100
},
'Resident.status_eat_dan': {
'explanation': 'Resident is eating',
'priority': 1,
'destination': 'kitchen',
'duration': 100
},
'Resident.status_med_med': {
'explanation': 'Resident is taking medication',
'priority': 3,
'destination': 'cupboard',
'duration': 100
},
'Resident.status_med_low': {
'explanation': 'Resident is taking medication',
'priority': 2,
'destination': 'cupboard',
'duration': 100
},
'Resident.status_med_dan': {
'explanation': 'Resident is taking medication',
'priority': 1,
'destination': 'cupboard',
'duration': 100
},
'Resident.status_ent_med': {
'explanation': 'Resident is entertaining themselves',
'priority': 3,
'destination': 'sofa',
'duration': 100
},
'Resident.status_ent_low': {
'explanation': 'Resident is entertaining themselves',
'priority': 2,
'destination': 'sofa',
'duration': 100
},
'Resident.status_ent_dan': {
'explanation': 'Resident is entertaining themselves',
'priority': 1,
'destination': 'sofa',
'duration': 100
},
'Resident.status_san_med': {
'explanation': 'Resident is going crazy with all these robots',
'priority': 3,
'destination': 'sofa',
'duration': 100
},
'Resident.status_san_low': {
'explanation': 'Resident is going crazy with all these robots',
'priority': 2,
'destination': 'sofa',
'duration': 100
},
'Resident.status_san_dan': {
'explanation': 'Resident is going crazy with all these robots',
'priority': 1,
'destination': 'sofa',
'duration': 100
},
'Resident.status_fit_med': {
'explanation': 'Resident is exercising',
'priority': 3,
'destination': 'gym',
'duration': 100
},
'Resident.status_fit_low': {
'explanation': 'Resident is exercising',
'priority': 2,
'destination': 'gym',
'duration': 100
},
'Resident.status_fit_dan': {
'explanation': 'Resident is exercising',
'priority': 1,
'destination': 'gym',
'duration': 100
},
'Resident.status_hyd_med': {
'explanation': 'Resident is hydrating',
'priority': 3,
'destination': 'kitchen',
'duration': 100
},
'Resident.status_hyd_low': {
'explanation': 'Resident is hydrating',
'priority': 2,
'destination': 'kitchen',
'duration': 100
},
'Resident.status_hyd_dan': {
'explanation': 'Resident is hydrating',
'priority': 1,
'destination': 'kitchen',
'duration': 100
},
'Resident.status_hyg_med': {
'explanation': 'Resident is bathing',
'priority': 3,
'destination': 'bathroom',
'duration': 100
},
'Resident.status_hyg_low': {
'explanation': 'Resident is bathing',
'priority': 2,
'destination': 'bathroom',
'duration': 100
},
'Resident.status_hyg_dan': {
'explanation': 'Resident is bathing',
'priority': 1,
'destination': 'bathroom',
'duration': 100
},
'Resident.status_rel_med': {
'explanation': 'Resident is peeing',
'priority': 3,
'destination': 'bathroom',
'duration': 100
},
'Resident.status_rel_low': {
'explanation': 'Resident is peeing',
'priority': 2,
'destination': 'bathroom',
'duration': 100
},
'Resident.status_rel_dan': {
'explanation': 'Resident is peeing',
'priority': 1,
'destination': 'bathroom',
'duration': 100
},
'Entertain.entertain_play_piano': {
'explanation': 'Entertainment robot is playing the piano',
'priority': 2,
'destination': 'piano',
'duration': 100
}
}
LEVELS = (
'Fullness', 'Health', 'Entertainment', 'Sanity',
'Fitness', 'Hydration', 'Hygiene', 'Relief'
)
# tasks run by the scheduler
SCHEDULED_TASKS = {
8: 'Resident.wakeup',
60: 'Entertain.entertain_play_piano',
23: 'Visitor.visit',
30: 'Cook.cook_breakfast',
45: 'Resident.eat_breakfast',
55: 'Resident.take_meds',
70: 'Cook.cook_lunch',
85: 'Resident.eat_lunch',
100: 'Resident.idle',
120: 'Visitor.visit',
150: 'Cook.cook_dinner',
170: 'Resident.eat_dinner',
200: 'Resident.sleep'
}
# resident status requirements
STATUS_TASKS = {
"Fullness: med": "Resident.status_eat_med",
"Fullness: low": "Resident.status_eat_low",
"Fullness: dan": "Resident.status_eat_dan",
"Health: med": "Resident.status_med_med",
"Health: low": "Resident.status_med_low",
"Health: dan": "Resident.status_med_dan",
"Entertainment: med": "Resident.status_ent_med",
"Entertainment: low": "Resident.status_ent_low",
"Entertainment: dan": "Resident.status_ent_dan",
"Sanity: med": "Resident.status_san_med",
"Sanity: low": "Resident.status_san_low",
"Sanity: dan": "Resident.status_san_dan",
"Fitness: med": "Resident.status_fit_med",
"Fitness: low": "Resident.status_fit_low",
"Fitness: dan": "Resident.status_fit_dan",
"Hydration: med": "Resident.status_hyd_med",
"Hydration: low": "Resident.status_hyd_low",
"Hydration: dan": "Resident.status_hyd_dan",
"Hygiene: med": "Resident.status_hyg_med",
"Hygiene: low": "Resident.status_hyg_low",
"Hygiene: dan": "Resident.status_hyg_dan",
"Relief: med": "Resident.status_rel_med",
"Relief: low": "Resident.status_rel_low",
"Relief: dan": "Resident.status_rel_dan"
}
|
swanndri/ROS-Healthcare-Simulator
|
se306/src/package1/scripts/database.py
|
Python
|
mit
| 11,787
|
[
"VisIt"
] |
917ee44ec1e845b4b96474a90914c9be8cf3b32fadc6556f801f656f9f493902
|
"""
Joint IDL semantic graph
"""
from collections import deque
import binascii
import copy
import itertools
import os
from .IdlParser import IdlParser
MYPY = False
if MYPY:
import typing
from typing import cast
else:
def cast(type_, obj):
return obj
if not hasattr(__builtins__, 'xrange'):
xrange = range
class TypeBase(object):
# pylint: disable=too-many-arguments
def __init__(
self,
name, # type: str
package_name_list, # type: typing.List[str]
location, # type: typing.Optional[dict]
variant_name, # type: str
index, # type: int
need_release # type: bool
): # type: (...) -> None
self.name = name
self.package_name_list = package_name_list
self.location = location
self.variant_name = variant_name
self.index = index
self.fullname = '.'.join(package_name_list + [name])
self.need_release = need_release
def __repr__(self): # type: () -> str
return self.fullname
class SemanticGraphException(Exception):
def __init__(self, location, message): # type: (dict, str) -> None
super(SemanticGraphException, self).__init__(message)
self.location = location
self.message = message
class EnumValue(object):
def __init__(self, name, value, location): # type: (str, int, dict) -> None
self.name = name
self.value = value
self.location = location
class Enum(TypeBase):
def __init__(self, name, package_name_list, location): # type: (str, typing.List[str], dict) -> None
super(Enum, self).__init__(name, package_name_list, location, variant_name='e', index=14, need_release=False)
self.values = [] # type: typing.List[EnumValue]
class StructMember(object):
def __init__(self, name, type_, location): # type: (str, TypeBase, dict) -> None
self.name = name
self.type = type_
self.location = location
class Struct(TypeBase):
def __init__(self, name, package_name_list, location): # type: (str, typing.List[str], dict) -> None
super(Struct, self).__init__(name, package_name_list, location, variant_name='members', index=15, need_release=True)
self.members = [] # type: typing.List[StructMember]
class Array(TypeBase):
def __init__(self, element_type): # type: (TypeBase) -> None
super(Array, self).__init__(
'{}[]'.format(element_type.name),
element_type.package_name_list,
element_type.location,
variant_name='array',
index=17,
need_release=True
)
self.element_type = element_type
class Parameter(object):
def __init__(self, index, name, type_, location): # type: (int, str, TypeBase, dict) -> None
self.index = index
self.name = name
self.type = type_
self.location = location
class Method(object):
def __init__(self, index, name, ret_type, location): # type: (int, str, TypeBase, dict) -> None
self.index = index
self.name = name
self.ret_type = ret_type
self.params = [] # type: typing.List[Parameter]
self.inherited = False
self.location = location
def copy_from_base(self, new_index):
result = copy.copy(self)
result.index = new_index
result.inherited = True
return result
class Interface(TypeBase):
def __init__(self, name, package_name_list, location): # type: (str, typing.List[str], dict) -> None
super(Interface, self).__init__(name, package_name_list, location, variant_name='obj', index=16, need_release=True)
self.methods = [] # type: typing.List[Method]
self.bases = [] # type: typing.List[Interface]
def calculate_checksum(self): # type: () -> None
ifc_str = self._ifc_str()
self.checksum = int(binascii.crc32(ifc_str.encode('utf-8'))) % (1 << 32) # pylint: disable=attribute-defined-outside-init
def _ifc_str(self): # type: () -> str
# pylint: disable=protected-access
return '{}({}){{{}}}'.format(self.fullname, ','.join('{}'.format(b._ifc_str()) for b in self.bases), ','.join(self._method_str(m) for m in self.methods))
def _method_str(self, m): # type: (Method) -> str
return '{} {}({})'.format(self._type_str(m.ret_type), m.name, ','.join(self._param_str(p) for p in m.params))
def _param_str(self, p): # type: (Parameter) -> str
return '{}'.format(self._type_str(p.type))
def _type_str(self, t): # type: (TypeBase) -> str
if isinstance(t, (BuiltinType, Interface)):
return t.fullname
elif isinstance(t, Enum):
return '{}{{{}}}'.format(t.fullname, ','.join('{}:{}'.format(v.name, v.value) for v in t.values))
elif isinstance(t, Struct):
return '{}{{{}}}'.format(t.fullname, ','.join('{} {}'.format(self._type_str(m.type), m.name) for m in t.members))
elif isinstance(t, Array):
return '{}[]'.format(self._type_str(t.element_type))
else:
raise RuntimeError('Not implemented (type: {})!'.format(t))
class Package(object):
def __init__(self, name_list): # type: (typing.List[str]) -> None
self.name_list = name_list
self.fullname = '.'.join(name_list)
self.interfaces = [] # type: typing.List[Interface]
self.enums = [] # type: typing.List[Enum]
self.structs = [] # type: typing.List[Struct]
def __repr__(self): # type: () -> str
return '.'.join(self.name_list)
def find_type(self, name): # type: (str) -> TypeBase
result = next((t for t in self.interfaces if t.name == name), None) # type: typing.Optional[TypeBase]
if not result:
result = next((t for t in self.enums if t.name == name), None)
if not result:
result = next((t for t in self.structs if t.name == name), None)
if not result:
raise LookupError('Type {}.{} was not declared!'.format('.'.join(self.name_list), name))
return result
class BuiltinTypeCategory(object):
class Value(object):
def __init__(self, need_release): # type: (bool) -> None
self.need_release = need_release
void = Value(False)
int = Value(False)
bool = Value(False)
float = Value(False)
string = Value(True)
# pylint: disable=too-many-instance-attributes
class BuiltinType(TypeBase):
# pylint: disable=too-many-arguments
def __init__(
self,
name, # type: str
variant_name, # type: str
index, # type: int
category, # type: BuiltinTypeCategory.Value
bits=0, # type: int
signed=False # type: bool
): # type: (...) -> None
super(BuiltinType, self).__init__(name, [], location=None, variant_name=variant_name, index=index, need_release=category.need_release)
self.name = name
self.fullname = name
self.variant_name = variant_name
self.index = index
self.category = category
self.need_release = category.need_release
self.signed = signed
self.bits = bits
class SemanticGraph(object):
def __init__(self): # type: () -> None
self.packages = [] # type: typing.List[Package]
self.flat_interfaces = [] # type: typing.List[Interface]
self.builtin_types = {
'void': BuiltinType('void', 'void', 1, BuiltinTypeCategory.void),
'bool': BuiltinType('bool', 'b', 2, BuiltinTypeCategory.bool),
'i8': BuiltinType('i8', 'i8', 3, BuiltinTypeCategory.int, 8, True),
'u8': BuiltinType('u8', 'u8', 4, BuiltinTypeCategory.int, 8, False),
'i16': BuiltinType('i16', 'i16', 5, BuiltinTypeCategory.int, 16, True),
'u16': BuiltinType('u16', 'u16', 6, BuiltinTypeCategory.int, 16, False),
'i32': BuiltinType('i32', 'i32', 7, BuiltinTypeCategory.int, 32, True),
'u32': BuiltinType('u32', 'u32', 8, BuiltinTypeCategory.int, 32, False),
'i64': BuiltinType('i64', 'i64', 9, BuiltinTypeCategory.int, 64, True),
'u64': BuiltinType('u64', 'u64', 10, BuiltinTypeCategory.int, 64, False),
'f32': BuiltinType('f32', 'f32', 11, BuiltinTypeCategory.float, 32),
'f64': BuiltinType('f64', 'f64', 12, BuiltinTypeCategory.float, 64),
'string': BuiltinType('string', 'utf8', 13, BuiltinTypeCategory.string)
} # type: typing.Dict[str, BuiltinType]
def find_package(self, name_list): # type: (typing.List[str]) -> Package
result = next((p for p in self.packages if p.name_list == name_list), None)
if not result:
raise LookupError('Package {} was not declared!'.format('.'.join(name_list)))
return result
def find_type(self, package_name_list, type_name): # type: (typing.List[str], str) -> TypeBase
return self.find_package(package_name_list).find_type(type_name)
def make_type(self, current_package, type_entry): # type: (Package, dict) -> TypeBase
result = self._make_decayed_type(current_package, type_entry)
if 'array' in type_entry:
for _ in type_entry['array']:
result = Array(result)
return result
def _make_decayed_type(self, current_package, type_entry): # type: (Package, dict) -> TypeBase
type_list = type_entry['name']
try:
return self.builtin_types['.'.join(type_list)]
except KeyError:
pkg = type_list[:-1]
type_name = type_list[-1]
for x in xrange(len(current_package.name_list), -1, -1):
pkg_to_check = current_package.name_list[:x] + pkg
try:
p = self.find_package(pkg_to_check)
return p.find_type(type_name)
except LookupError:
continue
raise SemanticGraphException(type_entry['location'], 'Unknown type: {}'.format('.'.join(type_list)))
class SemanticGraphBuilder(object):
def __init__(self, import_directories): # type: (typing.Sequence[str]) -> None
self._import_directories = import_directories
self._idl_parser = IdlParser()
self._predefined_imports = ['joint/IObject.idl']
# pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches
def build(self, filenames): # type: (typing.List[str]) -> SemanticGraph
semantic_graph = SemanticGraph()
files = self._get_files(filenames)
parsed_files = []
for f in files:
parsed_files.append(self._idl_parser.parse_file(f))
for ast in parsed_files:
package_name_list = list(ast['package'])
try:
pkg = semantic_graph.find_package(package_name_list)
except LookupError:
pkg = Package(package_name_list)
semantic_graph.packages.append(pkg)
for t_ast in ast['types']:
if t_ast['kind'] == 'interface':
ifc = Interface(t_ast['name'], pkg.name_list, t_ast['location'])
pkg.interfaces.append(ifc)
elif t_ast['kind'] == 'enum':
e = Enum(t_ast['name'], pkg.name_list, t_ast['location'])
pkg.enums.append(e)
elif t_ast['kind'] == 'struct':
s = Struct(t_ast['name'], pkg.name_list, t_ast['location'])
pkg.structs.append(s)
for ast in parsed_files:
package_name_list = list(ast['package'])
pkg = semantic_graph.find_package(package_name_list)
for t_ast in ast['types']:
if t_ast['kind'] == 'interface':
ifc = cast(Interface, pkg.find_type(t_ast['name']))
if 'bases' in t_ast:
for b_ast in t_ast['bases']:
base = semantic_graph.make_type(pkg, b_ast)
if not isinstance(base, Interface):
raise SemanticGraphException(b_ast['location'], '{} is not an interface'.format(base.name))
ifc.bases.append(base)
elif ifc.fullname != 'joint.IObject':
iobject_ifc = cast(Interface, semantic_graph.find_type(['joint'], 'IObject')) # type: Interface
ifc.bases.append(iobject_ifc)
self._add_base_methods(ifc, ifc.bases, set())
for m_ast in t_ast['methods']:
method = Method(len(ifc.methods), m_ast['name'], semantic_graph.make_type(pkg, m_ast['ret_type']), m_ast['location'])
p_index = 0
for p_ast in m_ast['params']:
p = Parameter(p_index, p_ast['name'], semantic_graph.make_type(pkg, p_ast['type']), p_ast['location'])
p_index += 1
method.params.append(p)
ifc.methods.append(method)
elif t_ast['kind'] == 'enum':
e = cast(Enum, pkg.find_type(t_ast['name']))
value = 0
if not t_ast['values']:
raise SemanticGraphException(t_ast['location'], 'Empty enum type: {}'.format(e.fullname))
for v_ast in t_ast['values']:
v = EnumValue(v_ast['name'], v_ast['value'] if 'value' in v_ast else value, v_ast['location'])
value = v.value + 1
e.values.append(v)
elif t_ast['kind'] == 'struct':
s = cast(Struct, pkg.find_type(t_ast['name']))
for m_ast in t_ast['members']:
member = StructMember(m_ast['name'], semantic_graph.make_type(pkg, m_ast['type']), m_ast['location'])
s.members.append(member)
for pkg in semantic_graph.packages:
for ifc in pkg.interfaces:
ifc.calculate_checksum()
semantic_graph.flat_interfaces = self._topologically_sort_interfaces(
list(itertools.chain.from_iterable(pkg.interfaces for pkg in semantic_graph.packages))
)
return semantic_graph
def _topologically_sort_interfaces(self, interfaces):
new_interfaces = {ifc.fullname: ifc for ifc in interfaces}
result = [] # type: typing.List[Interface]
def visit(ifc):
if ifc.fullname in new_interfaces:
new_interfaces.pop(ifc.fullname)
for b in ifc.bases:
visit(b)
result.append(ifc)
while new_interfaces:
ifc_name = next(iter(new_interfaces))
visit(new_interfaces[ifc_name])
return result
def _add_base_methods(self, ifc, bases, visited_interfaces): # type: (Interface, typing.Sequence[Interface], typing.Set[str]) -> None
for b in bases:
if b.fullname in visited_interfaces:
continue
visited_interfaces.add(b.fullname)
self._add_base_methods(ifc, b.bases, visited_interfaces)
for m in b.methods:
if not m.inherited:
ifc.methods.append(m.copy_from_base(len(ifc.methods)))
def _get_dependencies(self, idl_file_path): # type: (str) -> typing.Iterable[str]
ast = self._idl_parser.parse_file(idl_file_path)
if 'imports' in ast:
for import_ast_entry in ast['imports']:
yield self._resolve_import(import_ast_entry)
def _resolve_import(self, import_ast_entry): # type: (dict) -> str
path = import_ast_entry['path']
for import_dir in [''] + ['{}/'.format(d) for d in self._import_directories]:
idl_file = '{}{}'.format(import_dir, path)
if os.path.isfile(idl_file):
return idl_file
raise SemanticGraphException(import_ast_entry['location'], 'Cannot find idl file: {}'.format(path))
def _get_files(self, filenames): # type: (typing.List[str]) -> typing.List[str]
idl_files = [] # type: typing.List[str]
visited_filenames = set() # type: typing.Set[str]
predefined_import_paths = [self._resolve_import({'path': p, 'location': None}) for p in self._predefined_imports]
input_filenames = deque(predefined_import_paths + filenames)
while input_filenames:
filename = input_filenames.popleft()
if filename in visited_filenames:
continue
idl_files.append(filename)
visited_filenames.add(filename)
for dependency in self._get_dependencies(filename):
input_filenames.append(dependency)
return idl_files
|
koplyarov/joint
|
joint-gen/joint/SemanticGraph.py
|
Python
|
isc
| 16,894
|
[
"VisIt"
] |
7934e0f2ceb3d641a6ddc8174cb361d9d7e45cfb2c036822fe1f92ccccc285c8
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy._lib._util import getargspec_no_self as _getargspec
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
xtol : float, optional
Relative error in xopt acceptable for convergence.
ftol : number, optional
Relative error in func(xopt) acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
# Sets the initial step guess to dx ~ 1
old_fval = f(x0)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if np.isnan(fval):
# This can happen if the first call to f returned NaN;
# the loop is then never entered.
warnflag = 2
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
# Sets the initial step guess to dx ~ 1
old_fval = f(xk)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k * pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk, ord=norm)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
warnflag = 0
while (numpy.add.reduce(numpy.abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
while numpy.add.reduce(numpy.abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <
func(xa), func(xc) or a pair (xa,xb) which are used as a
starting interval for a downhill bracket search (see
`bracket`). Providing the pair (xa,xb) does not always mean
the obtained solution will satisfy xa<=x<=xb.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=nit < maxiter)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
"""
Return the minimum of a function of one variable.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
"""
options = {'xtol': tol}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))):
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estimated x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the global minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary. Thus, if a
minimum only needs to be found over the provided grid points, make
sure to pass in `finish=None`.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g. if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=False) or the text string (disp=True)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
|
chatcannon/scipy
|
scipy/optimize/optimize.py
|
Python
|
bsd-3-clause
| 96,767
|
[
"Gaussian"
] |
a0778a818832eb12433448927bd82d89cd906de68cf5bd76a305d61192d45c7c
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test unrestrictied single point logfiles in cclib"""
import os
import unittest
import numpy
from skip import skipForParser
from skip import skipForLogfile
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericSPunTest(unittest.TestCase):
"""Generic unrestricted single point unittest"""
def testnatom(self):
"""Is the number of atoms equal to 20?"""
self.assertEqual(self.data.natom, 20)
def testatomnos(self):
"""Are the atomnos correct?"""
self.assertTrue(numpy.alltrue([numpy.issubdtype(atomno, numpy.signedinteger)
for atomno in self.data.atomnos]))
self.assertEqual(self.data.atomnos.shape, (20,) )
self.assertEqual(sum(self.data.atomnos==6) + sum(self.data.atomnos==1), 20)
def testatomcoords(self):
"""Are the dimensions of atomcoords 1 x natom x 3?"""
self.assertEqual(self.data.atomcoords.shape,(1,self.data.natom,3))
@skipForParser('Jaguar', 'Data file does not contain enough information')
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 2 x nmo x nbasis?"""
if hasattr(self.data, "mocoeffs"):
self.assertIsInstance(self.data.mocoeffs, list)
self.assertEqual(len(self.data.mocoeffs), 2)
self.assertEqual(self.data.mocoeffs[0].shape,
(self.data.nmo, self.data.nbasis))
self.assertEqual(self.data.mocoeffs[1].shape,
(self.data.nmo, self.data.nbasis))
@skipForParser('Jaguar', 'Data file does not contain enough information')
@skipForParser('DALTON', 'mocoeffs not implemented yet')
def testfornsoormo(self):
"""Do we have NSOs or MOs?"""
self.assertTrue(
hasattr(self.data, "nsocoeffs") or hasattr(self.data, "mocoeffs")
)
def testdimnsoccnos(self):
"""Are the dimensions of nsooccnos equal to 2 x nmo?"""
if hasattr(self.data, "nsooccnos"):
self.assertIsInstance(self.data.nsooccnos, list)
self.assertIsInstance(self.data.nsooccnos[0], list)
self.assertIsInstance(self.data.nsooccnos[1], list)
self.assertEqual(len(self.data.nsooccnos), 2)
self.assertEqual(len(self.data.nsooccnos[0]), self.data.nmo)
self.assertEqual(len(self.data.nsooccnos[1]), self.data.nmo)
def testdimnsocoeffs(self):
"""Are the dimensions of nsocoeffs equal to 2 x nmo x nmo?"""
if hasattr(self.data, "nsocoeffs"):
self.assertIsInstance(self.data.nsocoeffs, list)
self.assertIsInstance(self.data.nsocoeffs[0], numpy.ndarray)
self.assertIsInstance(self.data.nsocoeffs[1], numpy.ndarray)
self.assertEqual(len(self.data.nsocoeffs), 2)
self.assertEqual(self.data.nsocoeffs[0].shape, (self.data.nmo, self.data.nmo))
self.assertEqual(self.data.nsocoeffs[1].shape, (self.data.nmo, self.data.nmo))
@skipForParser('Molcas','The parser is still being developed so we skip this test')
def testcharge_and_mult(self):
"""Are the charge and multiplicity correct?"""
self.assertEqual(self.data.charge, 1)
self.assertEqual(self.data.mult, 2)
def testhomos(self):
"""Are the homos correct?"""
msg = "%s != array([34,33],'i')" % numpy.array_repr(self.data.homos)
numpy.testing.assert_array_equal(self.data.homos, numpy.array([34,33],"i"), msg)
def testmoenergies(self):
"""Are the dims of the moenergies equals to 2 x nmo?"""
if hasattr(self.data, "moenergies"):
self.assertEqual(len(self.data.moenergies), 2)
self.assertEqual(len(self.data.moenergies[0]), self.data.nmo)
self.assertEqual(len(self.data.moenergies[1]), self.data.nmo)
@skipForParser('FChk', 'Fchk files do not have a section for symmetry')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Molpro', '?')
@skipForParser('ORCA', 'ORCA has no support for symmetry yet')
def testmosyms(self):
"""Are the dims of the mosyms equals to 2 x nmo?"""
shape = (len(self.data.mosyms), len(self.data.mosyms[0]))
self.assertEqual(shape, (2, self.data.nmo))
class GenericROSPTest(GenericSPunTest):
"""Customized restricted open-shell single point unittest"""
@skipForParser('DALTON', 'mocoeffs not implemented yet')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 1 x nmo x nbasis?"""
self.assertEqual(type(self.data.mocoeffs), type([]))
self.assertEqual(len(self.data.mocoeffs), 1)
self.assertEqual(self.data.mocoeffs[0].shape,
(self.data.nmo, self.data.nbasis))
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testhomos(self):
"""Are the HOMO indices equal to 34 and 33 (one more alpha electron
than beta electron)?
"""
msg = "%s != array([34, 33], 'i')" % numpy.array_repr(self.data.homos)
numpy.testing.assert_array_equal(self.data.homos, numpy.array([34, 33], "i"), msg)
@skipForParser('QChem', 'prints 2 sets of different MO energies?')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testmoenergies(self):
"""Are the dims of the moenergies equals to 1 x nmo?"""
self.assertEqual(len(self.data.moenergies), 1)
self.assertEqual(len(self.data.moenergies[0]), self.data.nmo)
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testmosyms(self):
"""Are the dims of the mosyms equals to 1 x nmo?"""
shape = (len(self.data.mosyms), len(self.data.mosyms[0]))
self.assertEqual(shape, (1, self.data.nmo))
class GamessUK70SPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 2 x (homos+6) x nbasis?"""
self.assertEqual(type(self.data.mocoeffs), type([]))
self.assertEqual(len(self.data.mocoeffs), 2)
# This is only an issue in version 7.0 (and before?), since in the version 8.0
# logfile all eigenvectors are happily printed.
shape_alpha = (self.data.homos[0]+6, self.data.nbasis)
shape_beta = (self.data.homos[1]+6, self.data.nbasis)
self.assertEqual(self.data.mocoeffs[0].shape, shape_alpha)
self.assertEqual(self.data.mocoeffs[1].shape, shape_beta)
def testnooccnos(self):
"""Are natural orbital occupation numbers the right size?"""
self.assertEqual(self.data.nooccnos.shape, (self.data.nmo, ))
class GamessUK80SPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testnooccnos(self):
"""Are natural orbital occupation numbers the right size?"""
self.assertEqual(self.data.nooccnos.shape, (self.data.nmo, ))
class GaussianSPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testatomnos(self):
"""Does atomnos have the right dimension (20)?"""
size = len(self.data.atomnos)
self.assertEqual(size, 20)
def testatomcharges(self):
"""Are atomcharges (at least Mulliken) consistent with natom and sum to one?"""
for type_ in set(['mulliken'] + list(self.data.atomcharges.keys())):
charges = self.data.atomcharges[type_]
self.assertEqual(len(charges), self.data.natom)
self.assertAlmostEqual(sum(charges), 1.0, delta=0.001)
def testatomspins(self):
spins = self.data.atomspins['mulliken']
self.assertEqual(len(spins), self.data.natom)
self.assertAlmostEqual(sum(spins), 1.0, delta=0.001)
class JaguarSPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testmoenergies(self):
"""Are the dims of the moenergies equal to 2 x homos+11?"""
self.assertEqual(len(self.data.moenergies), 2)
self.assertEqual(len(self.data.moenergies[0]), self.data.homos[0]+11)
self.assertEqual(len(self.data.moenergies[1]), self.data.homos[1]+11)
def testmosyms(self):
"""Are the dims of the mosyms equals to 2 x nmo?"""
shape0 = (len(self.data.mosyms), len(self.data.mosyms[0]))
shape1 = (len(self.data.mosyms), len(self.data.mosyms[1]))
self.assertEqual(shape0, (2, self.data.homos[0]+11))
self.assertEqual(shape1, (2, self.data.homos[1]+11))
if __name__=="__main__":
import sys
sys.path.insert(1, os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['SPun'])
suite.testall()
|
cclib/cclib
|
test/data/testSPun.py
|
Python
|
bsd-3-clause
| 9,713
|
[
"Dalton",
"Jaguar",
"MOLCAS",
"Molpro",
"ORCA",
"TURBOMOLE",
"cclib"
] |
38dde66487d5539f6f029358f2abac47c1ab9c9268e94b91b965d5ff2a2fe9f5
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
mpirun_exe = "mpirun -n {n} {placement} {program}"
""" Command-line to launch external mpi programs. """
def machine_dependent_call_modifier(formatter=None, comm=None, env=None):
""" Machine dependent modifications.
This is a fairly catch all place to put machine dependent stuff for mpi
calls, including mpi placement.
The formatter used to format the :py:data:`~pylada.mpirun_exe` string is
passed as the first argument. It can be modified *in-place* for machine
dependent stuff, or for mpi placement. The latter case occurs only if
``comm`` has a non-empty ``machines`` attribute. In that case,
:py:attr:`~pylada.process.mpi.machines` is a dictionary mapping the
hostnames to the number of procs on that host. Finally, an dictionary
containing the environment variables can also be passed. It should be
modified *in-place*.
By default, the 'placement' value of the formatter is modified to reflect
the nodefile of a specific mpi placement. This occurs only if
mpi-placement is requested (eg `comm.machines` exists and is not empty).
This function is called only from :py:function:`pylada.launch_program`. If
calls fail, it is a good idea to copy :py:function:`pylada.launch_program`
into your $HOME/.pylada and debug it from there.
:param dict formatter:
Dictionary used in formatting the command line of
:py:function:`~pylada.launch`. It should be modified *in-place*.
:param comm:
Communicator used in this particular calculation. At this point in
:py:function:`~pylada.launch_program`, dictionary data from the
communicator have been copied to the formatter. It is passed here in
case its attributes :py:attr:`~pylada.process.mpi.Communicator.machines`
or the nodefile returned by
:py:method:`~pylada.process.mpi.Communicator.nodefile`
is needed. However, the communicator itself should not be modified.
:type comm: :py:class:`~pylada.process.mpi.Communicator`
:param dict env:
Dictionary of environment variables in which to run the call.
:return: ignored
"""
pass
def machine_dependent_call_modifier_nodefiles(formatter=None, comm=None, env=None):
""" Version of machine_dependent_call_modifier that creates a nodefile """
import logging
from pylada import logger
if len(getattr(comm, 'machines', [])) != 0:
nfile = comm.nodefile()
formatter['placement'] = "-machinefile {0}".format(nfile)
if logger.isEnabledFor(logging.debug):
logger.debug("config/mpi: machine_dep_call_mod: nodefile: \"%s\"" % nfile)
with open(nfile) as fin:
logger.debug("config/mpi: machine_dep_call_mod: nodefile contents: \"%s\"" %fin.read())
def modify_global_comm(communicator):
""" Modifies global communicator so placement can be done correctly.
This function is called by :py:func:`create_global_comm`. It can be used
to modify the global communicator to work better with a custom placement
function.
"""
pass
def launch_program(cmdl, comm=None, formatter=None, env=None,
stdout=None, stderr=None, stdin=None, outdir=None):
""" Command used to launch a program.
This function launches external programs for Pylada. It is included as a
global so that it can be adapted to different computing environment. It
also makes it easier to debug Pylada's mpi configuration when installing on
a new machine.
.. note::
The number one configuration problem is an incorrect
:py:data:`~pylada.mpirun_exe`.
.. note::
The number two configuration problem is mpi-placement (eg how to launch
two different mpi program simultaneously in one PBS/SLURM job). First
read the manual for the mpi environment on the particular machine Pylada
is installed on. Then adapt
:py:function:`~pylada.machine_dependent_call_modifier` by redeclaring it
in $HOME/.pylada.
:param str cmld:
Command-line string. It will be formatted using ``formatter`` or
``comm`` if either are present. Otherwise, it should be exactly the
(bash) command-prompt.
:param comm:
Should contain everythin needed to launch an mpi call.
In practice, it is copied from :py:data:`~pylada.default_comm` and
modified for the purpose of a particular call (e.g. could use fewer
than all available procs)
:type comm: :py:class:`~pylada.process.mpi.Communicator`
:param dict formatter:
Dictionary with which to format the communicator. If ``comm`` is
present, then it will be updated with ``comm``'s input.
:param dict env:
Dictionary containing the environment variables in which to do call.
:param stdout:
File object to which to hook-up the standard output. See Popen_.
:param stderr:
File object to which to hook-up the standard error. See Popen_.
:param str outdir:
Path to the working directory.
.. _Popen:: http://docs.python.org/library/subprocess.html#popen-constructor
"""
from shlex import split as shlex_split
from subprocess import Popen
from pylada import machine_dependent_call_modifier
from pylada.misc import local_path
# At this point formatter is {"program": vasp}
# and cmdl is "mpirun -n {n} {placement} {program}"
# Set in formatter: 'placement': '', 'ppn': 8, 'n': 8
# make sure that the formatter contains stuff from the communicator, eg the
# number of processes.
if comm is not None and formatter is not None:
formatter.update(comm)
# Set in formatter: 'placement': '-machinefile /home.../pylada_commtempfile'
# Stuff that will depend on the supercomputer.
machine_dependent_call_modifier(formatter, comm, env)
# if a formatter exists, then use it on the cmdl string.
if formatter is not None:
cmdl = cmdl.format(**formatter)
# otherwise, if comm is not None, use that.
elif comm is not None:
cmdl = cmdl.format(**comm)
# Split command from string to list
cmdl = shlex_split(cmdl)
# makes sure the directory exists:
local_path(outdir).ensure(dir=True)
# Finally, start the process.
popen = Popen(cmdl, stdout=stdout, stderr=stderr, stdin=stdin,
cwd=outdir, env=env)
popen.wait()
#if testValidProgram: popen.wait()
return popen
default_comm = {'n': 2, 'ppn': 4, 'placement': ''}
""" Default communication dictionary.
should contain all key-value pairs used in :py:data:`mpirun_exe`. In a
script which manages mpi processes, it is also the global communicator. In
other words, it is the one which at the start of the application is given
knowledge of the machines (via :py:func:`~pylada.create_global_comm`). Other
communicators will have to acquire machines from this one. In that case, it
is likely that 'n' is modified.
"""
# pbs/slurm related stuff.
queues = ()
""" List of slurm or pbs queues allowed for use.
This is used by ipython's %launch magic function.
It is not required for slurm systems.
If empty, then %launch will not have a queue option.
"""
features = ['']
""" List of slurm or pbs features allowed for use.
This is used by ipython's %launch magic function.
It is not required for slurm systems.
If empty, then %launch will not have a feature option.
"""
###accounts = ['CSC000', 'BES000']
accounts = ['']
""" List of slurm or pbs accounts allowed for use.
This is used by ipython's %launch magic function.
It is not required for slurm systems.
If empty, then %launch will not have a queue option.
"""
debug_queue = "queue", "debug"
""" How to select the debug queue.
First part of the tuple is the keyword argument to modify when calling
the pbs job, and the second is its value.
"""
qsub_exe = "qsub"
""" Qsub/sbatch executable. """
qsub_array_exe = None
""" Qsub for job arrays.
If not None, if should be a tuple consisting of the command to launch job
arrays and the name of the environment variable holding the job index.
>>> qsub_array_exe = 'qsub -J 1-{nbjobs}', '$PBS_ARRAY_INDEX'
The format ``{array}`` will receive the arrays to launch.
"""
# qdel_exe = 'scancel'
qdel_exe = 'mjobctl -c'
""" Qdel/scancel executable. """
default_pbs = {
# 'account': accounts[0],
'walltime': "00:30:00",
'nnodes': 1,
'ppn': 1,
'header': '',
'footer': ''
}
""" Defaults parameters filling the pbs script. """
# pbs_string = '''#!/bin/bash
# SBATCH --account={account}
# SBATCH --time={walltime}
# SBATCH -N {nnodes}
# SBATCH -e {err}
# SBATCH -o {out}
# SBATCH -J {name}
# SBATCH -D {directory}
#
# echo config/mpi.py pbs_string: header: {header}
# echo config/mpi.py pbs_string: scriptcommand: python {scriptcommand}
# echo config/mpi.py pbs_string: footer: {footer}
#
#{header}
# python {scriptcommand}
#{footer}
#
#'''
pbs_string = '''#!/bin/bash
#PBS -q {queue}
#PBS -m n
#PBS -l walltime={walltime}
#PBS -l nodes={nnodes}
#PBS -e {err}
#PBS -o {out}
#PBS -N {name}
#PBS -d {directory}
cd {directory}
{header}
python {scriptcommand}
{footer}
'''
""" Default pbs/slurm script. """
do_multiple_mpi_programs = False
""" Whether to get address of host machines at start of calculation. """
# Figure out machine hostnames for a particular job.
# Can be any programs which outputs each hostname (once per processor),
# preceded by the string "PYLADA MACHINE HOSTNAME:"
figure_out_machines = '''
from socket import gethostname
from mpi4py import MPI
import os
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
hostname = gethostname()
names = comm.gather( hostname, root=0)
if rank == 0:
for nm in names:
print "PYLADA MACHINE HOSTNAME:", nm
'''
|
pylada/pylada-light
|
src/pylada/config/mpi.py
|
Python
|
gpl-3.0
| 11,266
|
[
"CRYSTAL",
"VASP"
] |
f37698b4784f83ba3c7df442e0feaa476ba5fede3ee2ed32640f53766cc7046e
|
# encoding: utf-8
"""
nrnpython implementation of the PyNN API.
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
import numpy
import logging
from pyNN import common
from pyNN.parameters import Sequence, ParameterSpace, simplify
from pyNN.standardmodels import StandardCellType
from pyNN.random import RandomDistribution
from . import simulator
from .recording import Recorder
logger = logging.getLogger("PyNN")
class PopulationMixin(object):
def _set_parameters(self, parameter_space):
"""parameter_space should contain native parameters"""
parameter_space.evaluate(mask=numpy.where(self._mask_local)[0])
for cell, parameters in zip(self, parameter_space):
for name, val in parameters.items():
setattr(cell._cell, name, val)
def _get_parameters(self, *names):
"""
return a ParameterSpace containing native parameters
"""
parameter_dict = {}
for name in names:
if name == 'spike_times': # hack
parameter_dict[name] = [Sequence(getattr(id._cell, name)) for id in self]
else:
parameter_dict[name] = simplify(numpy.array([getattr(id._cell, name) for id in self]))
return ParameterSpace(parameter_dict, shape=(self.local_size,))
def _set_initial_value_array(self, variable, initial_values):
if initial_values.is_homogeneous:
value = initial_values.evaluate(simplify=True)
for cell in self: # only on local node
setattr(cell._cell, "%s_init" % variable, value)
else:
if isinstance(initial_values.base_value, RandomDistribution) and initial_values.base_value.rng.parallel_safe:
local_values = initial_values.evaluate()[self._mask_local]
else:
local_values = initial_values[self._mask_local]
for cell, value in zip(self, local_values):
setattr(cell._cell, "%s_init" % variable, value)
class Assembly(common.Assembly):
__doc__ = common.Assembly.__doc__
_simulator = simulator
class PopulationView(common.PopulationView, PopulationMixin):
__doc__ = common.PopulationView.__doc__
_simulator = simulator
_assembly_class = Assembly
def _get_view(self, selector, label=None):
return PopulationView(self, selector, label)
class Population(common.Population, PopulationMixin):
__doc__ = common.Population.__doc__
_simulator = simulator
_recorder_class = Recorder
_assembly_class = Assembly
def __init__(self, size, cellclass, cellparams=None, structure=None,
initial_values={}, label=None):
__doc__ = common.Population.__doc__
common.Population.__init__(self, size, cellclass, cellparams,
structure, initial_values, label)
simulator.initializer.register(self)
def _get_view(self, selector, label=None):
return PopulationView(self, selector, label)
def _create_cells(self):
"""
Create cells in NEURON using the celltype of the current Population.
"""
# this method should never be called more than once
# perhaps should check for that
self.first_id = simulator.state.gid_counter
self.last_id = simulator.state.gid_counter + self.size - 1
self.all_cells = numpy.array([id for id in range(self.first_id, self.last_id + 1)],
simulator.ID)
# mask_local is used to extract those elements from arrays that apply to the cells on the current node
# round-robin distribution of cells between nodes
self._mask_local = self.all_cells % simulator.state.num_processes == simulator.state.mpi_rank
if isinstance(self.celltype, StandardCellType):
parameter_space = self.celltype.native_parameters
else:
parameter_space = self.celltype.parameter_space
parameter_space.shape = (self.size,)
parameter_space.evaluate(mask=None)
for i, (id, is_local, params) in enumerate(zip(self.all_cells, self._mask_local, parameter_space)):
self.all_cells[i] = simulator.ID(id)
self.all_cells[i].parent = self
if is_local:
if hasattr(self.celltype, "extra_parameters"):
params.update(self.celltype.extra_parameters)
self.all_cells[i]._build_cell(self.celltype.model, params)
simulator.initializer.register(*self.all_cells[self._mask_local])
simulator.state.gid_counter += self.size
def _native_rset(self, parametername, rand_distr):
"""
'Random' set. Set the value of parametername to a value taken from
rand_distr, which should be a RandomDistribution object.
"""
assert isinstance(rand_distr.rng, NativeRNG)
rng = simulator.h.Random(rand_distr.rng.seed or 0)
native_rand_distr = getattr(rng, rand_distr.name)
rarr = [native_rand_distr(*rand_distr.parameters)] + [rng.repick() for i in range(self.all_cells.size - 1)]
self.tset(parametername, rarr)
|
anupkdas-nus/global_synapses
|
pyNN-dispackgaes/neuron/populations.py
|
Python
|
gpl-3.0
| 5,219
|
[
"NEURON"
] |
b7d8635e8f6b16789c445c9d9cd275aa2bc420a0b9cc33af0b29f2302f4e64af
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Script to visualize the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SEPARATION_PLANE
from pymatgen.analysis.chemenv.utils.scripts_utils import visualize
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane
import numpy as np
if __name__ == '__main__':
print('+-------------------------------------------------------+\n'
'| Development script of the ChemEnv utility of pymatgen |\n'
'| Visualization of the model coordination environments |\n'
'+-------------------------------------------------------+\n')
allcg = AllCoordinationGeometries()
vis = None
while True:
cg_symbol = input('Enter symbol of the geometry you want to see, "l" to see the list '
'of existing geometries or "q" to quit : ')
if cg_symbol == 'q':
break
if cg_symbol == 'l':
print(allcg.pretty_print(maxcn=13, additional_info={'nb_hints': True}))
continue
try:
cg = allcg[cg_symbol]
except LookupError:
print('Wrong geometry, try again ...')
continue
print(cg.name)
for ipoint, point in enumerate(cg.points):
print('Point #{:d} : {} {} {}'.format(ipoint, repr(point[0]), repr(point[1]), repr(point[2])))
print('Algorithms used :')
for ialgo, algo in enumerate(cg.algorithms):
print('Algorithm #{:d} :'.format(ialgo))
print(algo)
print('')
# Visualize the separation plane of a given algorithm
sepplane = False
if any([algo.algorithm_type == SEPARATION_PLANE for algo in cg.algorithms]):
test = input('Enter index of the algorithm for which you want to visualize the plane : ')
if test != '':
try:
ialgo = int(test)
algo = cg.algorithms[ialgo]
sepplane = True
except Exception:
print('Unable to determine the algorithm/separation_plane you want '
'to visualize for this geometry. Continues without ...')
myfactor = 3.0
if vis is None:
vis = visualize(cg=cg, zoom=1.0, myfactor=myfactor)
else:
vis = visualize(cg=cg, vis=vis, myfactor=myfactor)
cg_points = [myfactor * np.array(pp) for pp in cg.points]
cg_central_site = myfactor * np.array(cg.central_site)
if sepplane:
pts = [cg_points[ii] for ii in algo.plane_points]
if algo.minimum_number_of_points == 2:
pts.append(cg_central_site)
centre = cg_central_site
else:
centre = np.sum(pts, axis=0) / len(pts)
factor = 1.5
target_dist = max([np.dot(pp - centre, pp - centre) for pp in cg_points])
current_dist = np.dot(pts[0] - centre, pts[0] - centre)
factor = factor * target_dist / current_dist
plane = Plane.from_npoints(points=pts)
p1 = centre + factor * (pts[0] - centre)
perp = factor * np.cross(pts[0] - centre, plane.normal_vector)
p2 = centre + perp
p3 = centre - factor * (pts[0] - centre)
p4 = centre - perp
vis.add_faces([[p1, p2, p3, p4]], [1.0, 0.0, 0.0], opacity=0.5)
target_radius = 0.25
radius = 1.5 * target_radius
if algo.minimum_number_of_points == 2:
vis.add_partial_sphere(coords=cg_central_site, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in pts:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
ps1 = [cg_points[ii] for ii in algo.point_groups[0]]
ps2 = [cg_points[ii] for ii in algo.point_groups[1]]
for pp in ps1:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 1.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in ps2:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 0.0, 1.0], start=0, end=360,
opacity=0.5)
vis.show()
|
tschaume/pymatgen
|
dev_scripts/chemenv/view_environment.py
|
Python
|
mit
| 5,049
|
[
"pymatgen"
] |
add249f4d999d13c673ae4aa190d1bd836f85ced3fe81f4fcaa68b589edc9c5a
|
#!usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import e_field_gen as e_field
import odeint_solve as ode
import sys as sys
ELEC_MASS = 9.10938356E-31
ELEC_CHARGE = -1.60217662E-19
FUND_FREQ = 3.7474057E14
SP_LIGHT = 3E8
PL_FWHM = 25E-15
FOCUS_RADIUS = 30E-6
PULSE_ENERGY = 0.6E-3
EPSILON_o = 8.85418782E-12
TIME_GRID = 200
INTENSITY = 1.88*(PULSE_ENERGY/(FOCUS_RADIUS**2*PL_FWHM))/np.pi #gaussian
FIELD_AMP = np.sqrt(2*INTENSITY/(EPSILON_o*SP_LIGHT))
FIELD_TOLERANCE = FIELD_AMP*1E-1
FIELD_AMP_ION = np.sqrt(2E14/(EPSILON_o*SP_LIGHT))
PONDER = (FIELD_AMP**2)*(ELEC_CHARGE**2)/(4*ELEC_MASS*(2*np.pi*FUND_FREQ)**2)
def plot(*args, **kwargs):
# x = np.linspace(slider_12.val*PL_FWHM,(slider_12.val + slider_11.val)*PL_FWHM,200)
# z_field = args[1][1](x)
t2 = np.linspace(args[1], args[2], len(args[-1]))
# y = args[0][0]
# z = args[0][1]
dist = args[-1]
# closest = 1E-9*args[0][:,0]/PONDER
# mask = [0 if np.ma.is_masked(i) else np.NaN for i in np.ma.masked_invalid(closest)]
# time = args[0][:,1]
#kin_en = 1E-9*args[0][:,2]/np.max(args[0][:,2])
y_field = 1E-9*kwargs['field'][0](t2)/FIELD_AMP
ax1.clear()
# l = ax1.plot(time, closest, 'b')
# l2 = ax1.plot(time, mask, 'r.')
l = ax1.plot(t2, dist, 'b',t2, args[-2], 'r')
l1 = ax1.plot(t2,y_field, 'g')
#l2 = ax1.plot(time, kin_en, 'g')
ax1.set_ylim(-1E-9, 2E-9)
ax1.set_xlim(-5*PL_FWHM, -4*PL_FWHM)
fig1.canvas.draw_idle()
def update(val):
qwp_1 = slider_1.val
hwp_2 = slider_2.val
qwp_2 = slider_3.val
hwp_3 = slider_4.val
qwp_3 = slider_5.val
delay_1 = slider_6.val
delay_2 = slider_7.val
ampl_1 = slider_8.val
ampl_2 = slider_9.val
ampl_3 = slider_10.val
closeness = slider_13.val
a = e_field.e_field_gen(3, False, ampl_1*FIELD_AMP, ampl_2*FIELD_AMP,
ampl_3*FIELD_AMP, 0,
delay_1/FUND_FREQ, delay_2/FUND_FREQ,
FUND_FREQ, 2*FUND_FREQ, 3*FUND_FREQ,
[[qwp_1], [hwp_2, qwp_2], [hwp_3, qwp_3]],
PL_FWHM, PL_FWHM, PL_FWHM,
b1='q', b2='hq', b3='hq')
t = np.linspace(-5*PL_FWHM, 5*PL_FWHM, 50)
y_field = a[0](t)
z_field = a[1](t)
tot = np.sqrt(y_field**2 + z_field**2)
times = t#[j for i,j in zip(tot,t) if i > FIELD_AMP_ION]
b = ode.solve_path(a, times[0], times[0] + 2/(FUND_FREQ), True, False, closeness*1E-9)
#print(b)
# plot(b,a)
plot(b[2:4], b[0], b[1], b[4], b[5], field=a)
# plot(a)
if __name__ == '__main__':
fig1 = plt.figure(1)
ax1 = plt.axes([0.05, 0.15, 0.9, 0.80])
#fig2 = plt.figure(2)
#ax2 = plt.axes([0.05, 0.15, 0.9, 0.80], projection='3d')
ax_slider_1 = plt.axes([0.1, 0.01, 0.2, 0.02])
ax_slider_2 = plt.axes([0.1, 0.04, 0.2, 0.02])
ax_slider_3 = plt.axes([0.1, 0.07, 0.2, 0.02])
ax_slider_4 = plt.axes([0.1, 0.1, 0.2, 0.02])
ax_slider_5 = plt.axes([0.1, 0.13, 0.2, 0.02])
ax_slider_6 = plt.axes([0.5, 0.01, 0.2, 0.02])
ax_slider_7 = plt.axes([0.5, 0.04, 0.2, 0.02])
ax_slider_8 = plt.axes([0.5, 0.07, 0.2, 0.02])
ax_slider_9 = plt.axes([0.5, 0.1, 0.2, 0.02])
ax_slider_10 = plt.axes([0.5, 0.13, 0.2, 0.02])
rax = plt.axes([0.0, 0.8, 0.1, 0.15])
ax_slider_11 = plt.axes([0.2, 0.97, 0.1, 0.02])
ax_slider_12 = plt.axes([0.6, 0.97, 0.1, 0.02])
ax_slider_13 = plt.axes([0.8, 0.97, 0.2, 0.02])
radio = widgets.RadioButtons(rax, ('CW', 'Pulsed'))
slider_1 = widgets.Slider(ax_slider_1, 'qwp_1', 0., 360)
slider_2 = widgets.Slider(ax_slider_2, 'hwp_2', 0., 360)
slider_3 = widgets.Slider(ax_slider_3, 'qwp_2', 0., 360)
slider_4 = widgets.Slider(ax_slider_4, 'hwp_3', 0., 360)
slider_5 = widgets.Slider(ax_slider_5, 'qwp_3', 0., 360)
slider_6 = widgets.Slider(ax_slider_6, 'delay_2', -2, 2)
slider_7 = widgets.Slider(ax_slider_7, 'delay_3', -2, 2)
slider_8 = widgets.Slider(ax_slider_8, 'ampl_1', 0, 1)
slider_9 = widgets.Slider(ax_slider_9, 'ampl_2', 0, 1)
slider_10 = widgets.Slider(ax_slider_10, 'ampl_3', 0, 1)
slider_11 = widgets.Slider(ax_slider_11, 'x-size', 0, 4)
slider_12 = widgets.Slider(ax_slider_12, 'x-start', -2, 2)
slider_13 = widgets.Slider(ax_slider_13, 'close', 0, 1)
#start
qwp_1 = slider_1.val
hwp_2 = slider_2.val
qwp_2 = slider_3.val
hwp_3 = slider_4.val
qwp_3 = slider_5.val
delay_1 = slider_6.val
delay_2 = slider_7.val
ampl_1 = slider_8.val
ampl_2 = slider_9.val
ampl_3 = slider_10.val
closeness = slider_13.val
a = e_field.e_field_gen(3, False, ampl_1*FIELD_AMP, 0*FIELD_AMP,
0*FIELD_AMP, 0,
delay_1/FUND_FREQ, delay_2/FUND_FREQ,
FUND_FREQ, 2*FUND_FREQ, 3*FUND_FREQ,
[[45], [0, 0], [0, 0]],
PL_FWHM, PL_FWHM, PL_FWHM,
b1='q', b2='hq', b3='hq')
t = np.linspace(-5*PL_FWHM, 5*PL_FWHM, 50)
y_field = a[0](t)
z_field = a[1](t)
tot = np.sqrt(y_field**2 + z_field**2)
times = t#[j for i,j in zip(tot,t) if i > FIELD_AMP_ION]
b = ode.solve_path(a, times[0], times[0] + 2/(FUND_FREQ), True, False, closeness*1E-9)
#print(b)
# plot(b,a)
plot(b[2:4], b[0], b[1], b[4], b[5], field=a)
# plot(a)
#end
slider_1.on_changed(update)
slider_2.on_changed(update)
slider_3.on_changed(update)
slider_4.on_changed(update)
slider_5.on_changed(update)
slider_6.on_changed(update)
slider_7.on_changed(update)
slider_8.on_changed(update)
slider_9.on_changed(update)
slider_10.on_changed(update)
slider_11.on_changed(update)
slider_12.on_changed(update)
slider_13.on_changed(update)
radio.on_clicked(update)
plt.show()
|
KavuriG/classical-calc-three-color
|
tester.py
|
Python
|
gpl-3.0
| 6,151
|
[
"Gaussian"
] |
78792c508fd136aec9f37d1ead7535142562fd85516e9d46483f9da6d780c091
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.display_functions import *
from ansible.utils.plugins import *
from ansible.utils.su_prompts import *
from ansible.callbacks import display
from ansible.module_utils.splitter import split_args, unquote
import ansible.constants as C
import ast
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import json
import subprocess
import contextlib
from vault import VaultLib
VERBOSITY=0
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
# caching the compilation of the regex used
# to check for lookup calls within data
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
CODE_REGEX = re.compile(r'(?:{%|%})')
try:
import json
except ImportError:
import simplejson as json
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
import builtin
except ImportError:
import __builtin__ as builtin
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
os.makedirs(key_path, mode=0700)
os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
elif not os.path.isdir(key_path):
raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
fh = os.fdopen(fd, 'w')
fh.write(str(key))
fh.close()
return key
else:
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def read_vault_file(vault_password_file):
"""Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
if vault_password_file:
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError), e:
raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
return vault_pass
else:
return None
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
from ansible.utils import template
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and '-' not in str(inject[conditional]):
conditional = inject[conditional]
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = str(conditional).replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in conditional:
return True
elif "is defined" in conditional:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("'"):
given = given[1:-1]
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
if basedir is None:
basedir = "."
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
from ansible.utils import template
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
role_spec = role_spec.strip()
role_version = ''
default_role_versions = dict(git='master', hg='tip')
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
def role_yaml_parse(role):
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = repo_url_to_role_name(role["src"])
return role
def json_loads(data):
''' parse a JSON string and return a data structure '''
return json.loads(data)
def _clean_data(orig_data, from_remote=False, from_inventory=False):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, basestring):
return orig_data
# when the data is marked as having come from a remote, we always
# replace any print blocks (ie. {{var}}), however when marked as coming
# from inventory we only replace print blocks that contain a call to
# a lookup plugin (ie. {{lookup('foo','bar'))}})
replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
with contextlib.closing(StringIO.StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == '{':
if token == '{%':
block_openings.append(token_start)
elif token == '{{':
print_openings.append(token_start)
elif token[1] == '}':
prev_idx = None
if token == '%}' and block_openings:
prev_idx = block_openings.pop()
elif token == '}}' and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write('{#')
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write('#}')
else:
assert False, 'Unhandled regex match'
return data.getvalue()
def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
'''
walk a complex data structure, and use _clean_data() to
remove any template tags that may exist
'''
if not from_remote and not from_inventory:
raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
if isinstance(orig_data, dict):
data = orig_data.copy()
for key in data:
new_key = _clean_data_struct(key, from_remote, from_inventory)
new_val = _clean_data_struct(data[key], from_remote, from_inventory)
if key != new_key:
del data[key]
data[new_key] = new_val
elif isinstance(orig_data, list):
data = orig_data[:]
for i in range(0, len(data)):
data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
elif isinstance(orig_data, basestring):
data = _clean_data(orig_data, from_remote, from_inventory)
else:
data = orig_data
return data
def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
results = json.loads(data)
except:
if no_exceptions:
return dict(failed=True, parsed=False, msg=raw_data)
else:
raise
if from_remote:
results = _clean_data_struct(results, from_remote, from_inventory)
return results
def serialize_args(args):
'''
Flattens a dictionary args to a k=v string
'''
module_args = ""
for (k,v) in args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
elif isinstance(v, bool):
module_args = "%s=%s %s" % (k, str(v), module_args)
return module_args.strip()
def merge_module_args(current_args, new_args):
'''
merges either a dictionary or string of k=v pairs with another string of k=v pairs,
and returns a new k=v string without duplicates.
'''
if not isinstance(current_args, basestring):
raise errors.AnsibleError("expected current_args to be a basestring")
# we use parse_kv to split up the current args into a dictionary
final_args = parse_kv(current_args)
if isinstance(new_args, dict):
final_args.update(new_args)
elif isinstance(new_args, basestring):
new_args_kv = parse_kv(new_args)
final_args.update(new_args_kv)
return serialize_args(final_args)
def parse_yaml(data, path_hint=None):
''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
stripped_data = data.lstrip()
loaded = None
if stripped_data.startswith("{") or stripped_data.startswith("["):
# since the line starts with { or [ we can infer this is a JSON document.
try:
loaded = json.loads(data)
except ValueError, ve:
if path_hint:
raise errors.AnsibleError(path_hint + ": " + str(ve))
else:
raise errors.AnsibleError(str(ve))
else:
# else this is pretty sure to be a YAML document
loaded = yaml.safe_load(data)
return loaded
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if ":{{" in replaced and "}}" in replaced:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None, show_content=True):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if show_content:
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
else:
# most likely displaying a file with sensitive content,
# so don't show any of the actual lines of yaml just the
# line number itself
msg = """Syntax error while loading YAML script, %s
The error appears to have been on line %s, column %s, but may actually
be before there depending on the exact syntax problem.
""" % (path, mark.line + 1, mark.column + 1)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path, vault_password=None):
''' convert a yaml file to a data structure '''
data = None
show_content = True
try:
data = open(path).read()
except IOError:
raise errors.AnsibleError("file could not read: %s" % path)
vault = VaultLib(password=vault_password)
if vault.is_encrypted(data):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
if vault_password is None:
raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
data = vault.decrypt(data)
show_content = False
try:
return parse_yaml(data, path_hint=path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path, show_content)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError, ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
for x in vargs:
if "=" in x:
k, v = x.split("=",1)
options[k.strip()] = unquote(v.strip())
return options
def _validate_both_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise errors.AnsibleError(
"failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
)
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
# we check here as well as in combine_vars() since this
# function can work recursively with nested dicts
_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def md5s(data):
''' Return MD5 hex digest of data. '''
digest = _md5()
try:
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def md5(filename):
''' Return MD5 hex digest of local file, None if file is not present or a directory. '''
if not os.path.exists(filename) or os.path.isdir(filename):
return None
digest = _md5()
blocksize = 64 * 1024
try:
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
except IOError, e:
raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
def version_info(gitinfo=False):
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def sanitize_output(str):
''' strips private info out of a string '''
private_keys = ['password', 'login_password']
filter_re = [
# filter out things like user:pass@foo/whatever
# and http://username:pass@wherever/foo
re.compile('^(?P<before>.*:)(?P<password>.*)(?P<after>\@.*)$'),
]
parts = str.split()
output = ''
for part in parts:
try:
(k,v) = part.split('=', 1)
if k in private_keys:
output += " %s=VALUE_HIDDEN" % k
else:
found = False
for filter in filter_re:
m = filter.match(v)
if m:
d = m.groupdict()
output += " %s=%s" % (k, d['before'] + "********" + d['after'])
found = True
break
if not found:
output += " %s" % part
except:
output += " %s" % part
return output.strip()
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password')
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
help='ask for su password')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true",
dest='sudo', help="run operations with sudo (nopasswd)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
dest='remote_user', help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
parser.add_option('-S', '--su', default=constants.DEFAULT_SU,
action='store_true', help='run operations with su')
parser.add_option('-R', '--su-user', help='run operations with su as this '
'user (default=%s)' % constants.DEFAULT_SU_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
vault_pass = None
new_vault_pass = None
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = vault_pass.strip()
if new_vault_pass:
new_vault_pass = new_vault_pass.strip()
return vault_pass, new_vault_pass
def ask_passwords(ask_pass=False, ask_sudo_pass=False, ask_su_pass=False, ask_vault_pass=False):
sshpass = None
sudopass = None
su_pass = None
vault_pass = None
sudo_prompt = "sudo password: "
su_prompt = "su password: "
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
sudo_prompt = "sudo password [defaults to SSH password]: "
if ask_sudo_pass:
sudopass = getpass.getpass(prompt=sudo_prompt)
if ask_pass and sudopass == '':
sudopass = sshpass
if ask_su_pass:
su_pass = getpass.getpass(prompt=su_prompt)
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
return (sshpass, sudopass, su_pass, vault_pass)
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_sudo_cmd(sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required.
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
prompt = '[sudo via ansible, key=%s] password: ' % randbits
success_key = 'SUDO-SUCCESS-%s' % randbits
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS,
prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd)))
return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
def make_su_cmd(su_user, executable, cmd):
"""
Helper function for connection plugins to create direct su commands
"""
# TODO: work on this function
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'SUDO-SUCCESS-%s' % randbits
sudocmd = '%s %s %s -c "%s -c %s"' % (
C.DEFAULT_SU_EXE, C.DEFAULT_SU_FLAGS, su_user, executable or '$SHELL',
pipes.quote('echo %s; %s' % (success_key, cmd))
)
return ('/bin/sh -c ' + pipes.quote(sudocmd), None, success_key)
_TO_UNICODE_TYPES = (unicode, type(None))
def to_unicode(value):
if isinstance(value, _TO_UNICODE_TYPES):
return value
return value.decode("utf-8")
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def list_union(a, b):
result = []
for x in a:
if x not in result:
result.append(x)
for x in b:
if x not in result:
result.append(x)
return result
def list_intersection(a, b):
result = []
for x in a:
if x in b and x not in result:
result.append(x)
return result
def list_difference(a, b):
result = []
for x in a:
if x not in b and x not in result:
result.append(x)
for x in b:
if x not in a and x not in result:
result.append(x)
return result
def contains_vars(data):
'''
returns True if the data contains a variable pattern
'''
return "$" in data or "{{" in data
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError, e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception, e:
if include_exceptions:
return (expr, e)
return expr
def listify_lookup_plugin_terms(terms, basedir, inject):
from ansible.utils import template
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and \
not stripped.startswith("/") and \
not stripped.startswith('set([') and \
not LOOKUP_REGEX.search(terms):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and "{{" in new_terms:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def combine_vars(a, b):
_validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
def before_comment(msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
def load_vars(basepath, results, vault_password=None):
"""
Load variables from any potential yaml filename combinations of basepath,
returning result.
"""
paths_to_check = [ "".join([basepath, ext])
for ext in C.YAML_FILENAME_EXTENSIONS ]
found_paths = []
for path in paths_to_check:
found, results = _load_vars_from_path(path, results, vault_password=vault_password)
if found:
found_paths.append(path)
# disallow the potentially confusing situation that there are multiple
# variable files for the same name. For example if both group_vars/all.yml
# and group_vars/all.yaml
if len(found_paths) > 1:
raise errors.AnsibleError("Multiple variable files found. "
"There should only be one. %s" % ( found_paths, ))
return results
## load variables from yaml files/dirs
# e.g. host/group_vars
#
def _load_vars_from_path(path, results, vault_password=None):
"""
Robustly access the file at path and load variables, carefully reporting
errors in a friendly/informative way.
Return the tuple (found, new_results, )
"""
try:
# in the case of a symbolic link, we want the stat of the link itself,
# not its target
pathstat = os.lstat(path)
except os.error, err:
# most common case is that nothing exists at that path.
if err.errno == errno.ENOENT:
return False, results
# otherwise this is a condition we should report to the user
raise errors.AnsibleError(
"%s is not accessible: %s."
" Please check its permissions." % ( path, err.strerror))
# symbolic link
if stat.S_ISLNK(pathstat.st_mode):
try:
target = os.path.realpath(path)
except os.error, err2:
raise errors.AnsibleError("The symbolic link at %s "
"is not readable: %s. Please check its permissions."
% (path, err2.strerror, ))
# follow symbolic link chains by recursing, so we repeat the same
# permissions checks above and provide useful errors.
return _load_vars_from_path(target, results, vault_password)
# directory
if stat.S_ISDIR(pathstat.st_mode):
# support organizing variables across multiple files in a directory
return True, _load_vars_from_folder(path, results, vault_password=vault_password)
# regular file
elif stat.S_ISREG(pathstat.st_mode):
data = parse_yaml_from_file(path, vault_password=vault_password)
if data and type(data) != dict:
raise errors.AnsibleError(
"%s must be stored as a dictionary/hash" % path)
elif data is None:
data = {}
# combine vars overrides by default but can be configured to do a
# hash merge in settings
results = combine_vars(results, data)
return True, results
# something else? could be a fifo, socket, device, etc.
else:
raise errors.AnsibleError("Expected a variable file or directory "
"but found a non-file object at path %s" % (path, ))
def _load_vars_from_folder(folder_path, results, vault_password=None):
"""
Load all variables within a folder recursively.
"""
# this function and _load_vars_from_path are mutually recursive
try:
names = os.listdir(folder_path)
except os.error, err:
raise errors.AnsibleError(
"This folder cannot be listed: %s: %s."
% ( folder_path, err.strerror))
# evaluate files in a stable order rather than whatever order the
# filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
for path in paths:
_found, results = _load_vars_from_path(path, results, vault_password=vault_password)
return results
def update_hash(hash, key, new_value):
''' used to avoid nested .update calls on the parent '''
value = hash.get(key, {})
value.update(new_value)
hash[key] = value
def censor_unlogged_data(data):
'''
used when the no_log: True attribute is passed to a task to keep data from a callback.
NOT intended to prevent variable registration, but only things from showing up on
screen
'''
new_data = {}
for (x,y) in data.iteritems():
if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
new_data[x] = y
new_data['censored'] = 'results hidden due to no_log parameter'
return new_data
|
qvicksilver/ansible
|
lib/ansible/utils/__init__.py
|
Python
|
gpl-3.0
| 55,670
|
[
"VisIt"
] |
e8788d3534ca4bd101bb362705b559d8796f06c777aa5710282c0d08e60d5390
|
'''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import os
import sys
import tarfile
import tempfile
import urllib
__NCBITAXONOMY_URL = 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz'
def load(writer, array_delimiter, source=__NCBITAXONOMY_URL):
'''Loads NCBI Taxonomy data.'''
nodes_filename, names_filename = _get_ncbi_taxonomy_files(source)
nodes, rels = _parse_nodes(nodes_filename, array_delimiter)
_parse_names(nodes, names_filename, array_delimiter)
writer.write_nodes(nodes.values(), 'Organism')
writer.write_rels(rels, 'Organism', 'Organism')
def _get_ncbi_taxonomy_files(source):
'''Downloads and extracts NCBI Taxonomy files.'''
temp_dir = tempfile.gettempdir()
temp_gzipfile = tempfile.NamedTemporaryFile()
urllib.urlretrieve(source, temp_gzipfile.name)
temp_tarfile = tarfile.open(temp_gzipfile.name, 'r:gz')
temp_tarfile.extractall(temp_dir)
temp_gzipfile.close()
temp_tarfile.close()
return os.path.join(temp_dir, 'nodes.dmp'), \
os.path.join(temp_dir, 'names.dmp')
def _parse_nodes(filename, array_delimiter):
'''Parses nodes file.'''
nodes = {}
rels = []
with open(filename, 'r') as textfile:
for line in textfile:
tokens = [x.strip() for x in line.split('|')]
tax_id = tokens[0]
if tax_id != '1':
rels.append([tax_id, 'is_a', tokens[1]])
nodes[tax_id] = {'taxonomy:ID(Organism)': tax_id,
':LABEL':
'Organism' + array_delimiter + tokens[2]}
return nodes, rels
def _parse_names(nodes, filename, array_delimiter):
'''Parses names file.'''
with open(filename, 'r') as textfile:
for line in textfile:
tokens = [x.strip() for x in line.split('|')]
node = nodes[tokens[0]]
if 'name' not in node:
node['name'] = tokens[1]
node['names:string[]'] = set([node['name']])
else:
node['names:string[]'].add(tokens[1])
for _, node in nodes.iteritems():
if 'names:string[]' in node:
node['names:string[]'] = \
array_delimiter.join(node['names:string[]'])
def main(argv):
'''main method'''
load(*argv)
if __name__ == "__main__":
main(sys.argv[1:])
|
synbiochem/biochem4j
|
sbcdb/ncbi_taxonomy_utils.py
|
Python
|
mit
| 2,520
|
[
"VisIt"
] |
28d74ba2284e11d035946c89d0d5714972a26b5c6308b5eb0623aa80dfec8294
|
from keras.models import load_model
from os import listdir
from os.path import join
import cv2
import numpy as np
import matplotlib.pyplot as plt
import settings
from scipy.misc import imresize
from skimage.segmentation import active_contour
from skimage.filters import gaussian
def read(path, size=settings.INPUT_SHAPE):
im = cv2.imread(path)
if im.shape != size:
im = imresize(im, size[:2])
return im
def segment(im, threshold=0.2):
def morph_op(im, e):
im = cv2.morphologyEx(im, cv2.MORPH_OPEN, e)
im = cv2.morphologyEx(im, cv2.MORPH_CLOSE, e)
return im
def segment_gray(im, only_max=False):
e = np.ones((7, 7))
new_im = np.ones(im.shape) * 0.5
im_high = im.copy()
im_high[im_high < 1 - threshold] = 0
im_high[im_high != 0] = 1
im_high = morph_op(im_high, e)
new_im[im_high == 1] = 1
if not only_max:
im_low = im.copy()
im_low[im_low > threshold] = 0
im_low[im_low != 0] = 1
im_low = morph_op(im_low, e)
new_im[im_low == 1] = 0
else:
new_im[new_im == 0.5] = 0
return new_im
def segment_color(im):
for i in range(im.shape[-1]):
im[:, :, i] = segment_gray(im[:, :, [i]], only_max=True)
if im.shape[-1] == 1:
im = segment_gray(im)
else:
im = segment_color(im)
return im
def normalize(im):
return im / 255
def draw_contours(im, background):
def get_contours(im):
im_low = np.ones(im.shape, dtype=np.uint8)
im_low[im<0.5] = 0
_, c_dark, _ = cv2.findContours(im_low, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
im_high= np.zeros(im.shape, dtype=np.uint8)
im_high[im>0.5] = 1
_, c_bright, _ = cv2.findContours(im_high, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return c_dark, c_bright
def draw_contours_gray(im, background):
c_dark, c_bright = get_contours(im)
cv2.drawContours(background, c_dark, -1, [0], 2)
cv2.drawContours(background, c_bright, -1, [0,0,255], 2)
return background
if im.shape[-1] == 1:
im = draw_contours_gray(im, background)
return im
def joined_listdir(folder):
return [join(folder, file) for file in listdir(folder)]
def imshow(im):
if im.shape[-1] == 3:
plt.imshow(im[:, :, ::-1], vmin=0, vmax=1)
else:
plt.imshow(im[:, :, 0], vmin=0, vmax=1, cmap='hot')
files = [joined_listdir(join(settings.DATASET, 'val', folder)) for folder in settings.TAGS]
n = len(files)
model = load_model(join('models', 'saliency.h5'))
while True:
plt.figure(figsize=(10, 14))
files_i = [np.random.randint(0, len(files[i]), size=1)[0] for i in range(n)]
sample_files = [file_t[file_i] for (file_t, file_i) in zip(files, files_i)]
sample_files = [join('..', 'imgs', 'eyes', 'val', 'closed_eyes', '3304859 0.png'),
join('..', 'imgs', 'eyes', 'val', 'looking_at_viewer', '3578131 0.png'),
join('..', 'imgs', 'eyes', 'val', 'looking_at_viewer', '3578918 0.png'),
join('..', 'imgs', 'eyes', 'val', 'looking_at_viewer', '3582898 0.png'),
]
n = 4
ims = np.array(normalize(np.array([read(file) for file in sample_files])))
res = model.predict(ims.copy())
segmentation = [segment(im.copy()) for im in res]
contours = [draw_contours(s, im.copy()) for s, im in zip(segmentation, ims)]
col = 3
for j in range(n):
plt.subplot(n, col, j * col + 1)
imshow(ims[j])
if j == 0:
plt.title('Original')
plt.subplot(n, col, j * col + 2)
imshow(res[j])
if j == 0:
plt.title('Network output')
plt.subplot(n, col, j * col + 3)
imshow(contours[j])
if j == 0:
plt.title('Contours')
plt.tight_layout()
plt.show()
|
Rignak/Scripts-Python
|
DeepLearning/AutoEncoder/Saliency_test.py
|
Python
|
gpl-3.0
| 3,955
|
[
"Gaussian"
] |
c62cef23bc005311ece0403bd5155c94b48b9556cd5e934463148259b9d2246a
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, date
from lxml import etree
import time
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'sequence': fields.integer('Sequence'),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'legend_priority': fields.char(
'Priority Management Explanation', translate=True,
help='Explanation text to help users using the star and priority mechanism on stages or issues that are in this stage.'),
'legend_blocked': fields.char(
'Kanban Blocked Explanation', translate=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.'),
'legend_done': fields.char(
'Kanban Valid Explanation', translate=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.'),
'legend_normal': fields.char(
'Kanban Ongoing Explanation', translate=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.'),
'fold': fields.boolean('Folded in Tasks Pipeline',
help='This stage is folded in the kanban view when '
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx=None):
if ctx is None:
ctx = {}
default_project_id = ctx.get('default_project_id')
return [default_project_id] if default_project_id else None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
_period_number = 5
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
analytic_account_to_delete = set()
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
if proj.analytic_account_id and not proj.analytic_account_id.line_ids:
analytic_account_to_delete.add(proj.analytic_account_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
self.pool['account.analytic.account'].unlink(cr, uid, list(analytic_account_to_delete), context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res={}
for project in self.browse(cr, uid, ids, context=context):
res[project.id] = len(project.task_ids)
return res
def _task_needaction_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
res = dict.fromkeys(ids, 0)
projects = Task.read_group(cr, uid, [('project_id', 'in', ids), ('message_needaction', '=', True)], ['project_id'], ['project_id'], context=context)
res.update({project['project_id'][0]: int(project['project_id_count']) for project in projects})
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('portal', _('Customer Project: visible in portal if the customer is a follower')),
('employees', _('All Employees Project: all employees can access')),
('followers', _('Private Project: followers only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'help': _('''<p class="oe_view_nocontent_create">
Documents are attached to the tasks and issues of your project.</p><p>
Send messages or log internal notes with attachments to link
documents to your project.
</p>'''),
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'label_tasks': fields.char('Use Tasks as', help="Gives label to tasks on project's kanban view."),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks",),
'task_needaction_count': fields.function(_task_needaction_count, type='integer', string="Tasks",),
'task_ids': fields.one2many('project.task', 'project_id',
domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]),
'color': fields.integer('Color Index'),
'user_id': fields.many2one('res.users', 'Project Manager'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized "
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Portal : employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('close','Closed')],
'Status', required=True, copy=False),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
}
_order = "sequence, name, id"
_defaults = {
'active': True,
'type': 'contract',
'label_tasks': 'Tasks',
'state': 'open',
'sequence': 10,
'user_id': lambda self,cr,uid,ctx: uid,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower than project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
for follower in proj.message_follower_ids:
self.message_subscribe(cr, uid, res, partner_ids=[follower.partner_id.id], subtype_ids=[subtype.id for subtype in follower.subtype_ids])
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end}, context=context)
result.append(new_id)
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
}
@api.multi
def setActive(self, value=True):
""" Set a project as active/inactive, and its tasks as well. """
self.write({'active': value})
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# Prevent double project creation when 'use_tasks' is checked + alias management
create_context = dict(context, project_creation_in_progress=True,
alias_model_name=vals.get('alias_model', 'project.task'),
alias_parent_model_name=self._name,
mail_create_nosubscribe=True)
ir_values = self.pool.get('ir.values').get_default(cr, uid, 'project.config.settings', 'generate_project_alias')
if ir_values:
vals['alias_name'] = vals.get('alias_name') or vals.get('name')
project_id = super(project, self).create(cr, uid, vals, context=create_context)
project_rec = self.browse(cr, uid, project_id, context=context)
values = {'alias_parent_thread_id': project_id, 'alias_defaults': {'project_id': project_id}}
self.pool.get('mail.alias').write(cr, uid, [project_rec.alias_id.id], values, context=context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
res = super(project, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
# archiving/unarchiving a project does it on its tasks, too
projects = self.browse(cr, uid, ids, context)
tasks = projects.with_context(active_test=False).mapped('tasks')
tasks.write({'active': vals['active']})
return res
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
def _get_default_partner(self, cr, uid, context=None):
if context is None:
context = {}
if 'default_project_id' in context:
project = self.pool.get('project.project').browse(cr, uid, context['default_project_id'], context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
if context is None:
context = {}
return self.stage_find(cr, uid, [], context.get('default_project_id'), [('fold', '=', False)], context=context)
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
if context is None:
context = {}
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
if 'default_project_id' in context:
search_domain = ['|', ('project_ids', '=', context['default_project_id']), ('id', 'in', ids)]
else:
search_domain = [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6,0,list(new_parent_ids))],
'child_ids': [(6,0,list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
current = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default['name'] = _("%s (copy)") % current.name
if 'remaining_hours' not in default:
default['remaining_hours'] = current.planned_hours
return super(task, self).copy_data(cr, uid, id, default, context)
_columns = {
'active': fields.boolean('Active'),
'name': fields.char('Task Title', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.html('Description'),
'priority': fields.selection([('0','Normal'), ('1','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'tag_ids': fields.many2many('project.tags', string='Tags', oldname='categ_ids'),
'kanban_state': fields.selection([('normal', 'In Progress'),('done', 'Ready for next stage'),('blocked', 'Blocked')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=True, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_assign': fields.datetime('Assigning Date', select=True, copy=False, readonly=True),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False, readonly=True),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'manager_id': fields.related('project_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'attachment_ids': fields.one2many('ir.attachment', 'res_id', domain=lambda self: [('res_model', '=', self._name)], auto_join=True, string='Attachments'),
# In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id
'displayed_image_id': fields.many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Displayed Image'),
'legend_blocked': fields.related("stage_id", "legend_blocked", type="char", string='Kanban Blocked Explanation'),
'legend_done': fields.related("stage_id", "legend_done", type="char", string='Kanban Valid Explanation'),
'legend_normal': fields.related("stage_id", "legend_normal", type="char", string='Kanban Ongoing Explanation'),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': lambda self, cr, uid, ctx=None: ctx.get('default_project_id') if ctx is not None else False,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
'date_start': fields.datetime.now,
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task starting date must be lower than its ending date.', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
try:
# using get_object to get translation value
uom_hour = self.pool['ir.model.data'].get_object(cr, uid, 'product', 'product_uom_hour', context=context)
except ValueError:
uom_hour = False
if not obj_tm or not uom_hour or obj_tm.id == uom_hour.id:
return res
eview = etree.fromstring(res['arch'])
# if the project_time_mode_id is not in hours (so in days), display it as a float field
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
# replace reference of 'Hours' to 'Day(s)'
for f in res['fields']:
# TODO this NOT work in different language than english
# the field 'Initially Planned Hours' should be replaced by 'Initially Planned Days'
# but string 'Initially Planned Days' is not available in translation
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours', obj_tm.name)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise UserError(_("Child task still open.\nPlease cancel or complete child task first."))
return True
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.datetime.now()
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.stage_id.id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _get_total_hours(self):
return self.remaining_hours
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
company = self.pool["res.users"].browse(cr, uid, uid, context=context).company_id
duration_uom = {
'day(s)': 'd', 'days': 'd', 'day': 'd', 'd': 'd',
'month(s)': 'm', 'months': 'm', 'month': 'month', 'm': 'm',
'week(s)': 'w', 'weeks': 'w', 'week': 'w', 'w': 'w',
'hour(s)': 'H', 'hours': 'H', 'hour': 'H', 'h': 'H',
}.get(company.project_time_mode_id.name.lower(), "hour(s)")
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2f%s\"
%s effort = \"%.2f%s\"''' % (ident, task.id, ident, task.remaining_hours, duration_uom, ident, task._get_total_hours(), duration_uom)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'kanban_state' in init_values and record.kanban_state == 'blocked':
return 'project.mt_task_blocked'
elif 'kanban_state' in init_values and record.kanban_state == 'done':
return 'project.mt_task_ready'
elif 'user_id' in init_values and record.user_id: # assigned -> new
return 'project.mt_task_new'
elif 'stage_id' in init_values and record.stage_id and record.stage_id.sequence <= 1: # start stage -> new
return 'project.mt_task_new'
elif 'stage_id' in init_values:
return 'project.mt_task_stage'
return super(task, self)._track_subtype(cr, uid, ids, init_values, context=context)
def _notification_group_recipients(self, cr, uid, ids, message, recipients, done_ids, group_data, context=None):
""" Override the mail.thread method to handle project users and officers
recipients. Indeed those will have specific action in their notification
emails: creating tasks, assigning it. """
group_project_user = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'project.group_project_user')
for recipient in recipients:
if recipient.id in done_ids:
continue
if recipient.user_ids and group_project_user in recipient.user_ids[0].groups_id.ids:
group_data['group_project_user'] |= recipient
done_ids.add(recipient.id)
return super(task, self)._notification_group_recipients(cr, uid, ids, message, recipients, done_ids, group_data, context=context)
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
res = super(task, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
take_action = self._notification_link_helper(cr, uid, ids, 'assign', context=context)
new_action_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'project.action_view_task')
new_action = self._notification_link_helper(cr, uid, ids, 'new', context=context, action_id=new_action_id)
task_record = self.browse(cr, uid, ids[0], context=context)
actions = []
if not task_record.user_id:
actions.append({'url': take_action, 'title': _('I take it')})
else:
actions.append({'url': new_action, 'title': _('New Task')})
res['group_project_user'] = {
'actions': actions
}
return res
@api.cr_uid_context
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), default=default, context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def email_split(self, cr, uid, ids, msg, context=None):
email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))
# check left-part is not already an alias
task_ids = self.browse(cr, uid, ids, context=context)
aliases = [task.project_id.alias_name for task in task_ids if task.project_id]
return filter(lambda x: x.split('@')[0] not in aliases, email_list)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
'partner_id': msg.get('author_id', False)
}
defaults.update(custom_values)
res = super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
email_list = self.email_split(cr, uid, [res], msg, context=context)
partner_ids = filter(None, self._find_partner_from_emails(cr, uid, [res], email_list, force_create=False, context=context))
self.message_subscribe(cr, uid, [res], partner_ids, context=context)
return res
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
email_list = self.email_split(cr, uid, ids, msg, context=context)
partner_ids = filter(None, self._find_partner_from_emails(cr, uid, ids, email_list, force_create=False, context=context))
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(task, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for data in self.browse(cr, uid, ids, context=context):
if data.partner_id:
reason = _('Customer Email') if data.partner_id.email else _('Customer')
data._message_add_suggested_recipient(recipients, partner=data.partner_id, reason=reason)
return recipients
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
def _compute_project_count(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = len(account.project_ids)
return result
_columns = {
'use_tasks': fields.boolean('Tasks', help="Check this box to manage internal activities through this project"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', string="Company UOM", type='many2one', relation='product.uom'),
'project_ids': fields.one2many('project.project', 'analytic_account_id', 'Projects'),
'project_count': fields.function(_compute_project_count, 'Project Count', type='integer')
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
@api.cr_uid_id_context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'use_tasks': True,
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_tasks = self.pool['project.task'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_tasks:
raise UserError(_('Please remove existing tasks in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
def projects_action(self, cr, uid, ids, context=None):
accounts = self.browse(cr, uid, ids, context=context)
project_ids = sum([account.project_ids.ids for account in accounts], [])
result = {
"type": "ir.actions.act_window",
"res_model": "project.project",
"views": [[False, "tree"], [False, "form"]],
"domain": [["id", "in", project_ids]],
"context": {"create": False},
"name": "Projects",
}
if len(project_ids) == 1:
result['views'] = [(False, "form")]
result['res_id'] = project_ids[0]
else:
result = {'type': 'ir.actions.act_window_close'}
return result
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection([('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_tags(osv.Model):
""" Tags of project's tasks (or issues) """
_name = "project.tags"
_description = "Tags of project's tasks, issues..."
_columns = {
'name': fields.char('Name', required=True),
'color': fields.integer('Color Index'),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
|
AyoubZahid/odoo
|
addons/project/project.py
|
Python
|
gpl-3.0
| 53,608
|
[
"VisIt"
] |
36e1211b8b78600e1eea54d66e5bccd3bea6502d4a024226271146749b54d210
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Connection and Authentication classes.
Information about setting up your Google OAUTH2 credentials:
For libcloud, there are two basic methods for authenticating to Google using
OAUTH2: Service Accounts and Client IDs for Installed Applications.
Both are initially set up from the Cloud Console_
_Console: https://cloud.google.com/console
Setting up Service Account authentication (note that you need the PyCrypto
package installed to use this):
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Click on "Create New Client ID..."
- Select "Service account" and click on "Create Client ID"
- Download the Private Key (should happen automatically). The key you download
is in JSON format.
- Move the .json file to a safe location.
- Optionally, you may choose to Generate a PKCS12 key from the Console.
It needs to be converted to the PEM format. Please note, the PKCS12 format
is deprecated and may be removed in a future release.
- Convert the key using OpenSSL (the default password is 'notasecret'):
``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts
-passin pass:notasecret | openssl rsa -out PRIV.pem``
- Move the .pem file to a safe location.
- To Authenticate, you will need to pass the Service Account's "Email
address" in as the user_id and the path to the .pem file as the key.
Setting up Installed Application authentication:
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Select "Installed application" and "Other" then click on
"Create Client ID"
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
secret" as the key
- The first time that you do this, the libcloud will give you a URL to
visit. Copy and paste the URL into a browser.
- When you go to the URL it will ask you to log in (if you aren't already)
and ask you if you want to allow the project access to your account.
- Click on Accept and you will be given a code.
- Paste that code at the prompt given to you by the Google libcloud
connection.
- At that point, a token & refresh token will be stored in your home
directory and will be used for authentication.
Please remember to secure your keys and access tokens.
"""
from __future__ import with_statement
try:
import simplejson as json
except ImportError:
import json
import base64
import errno
import time
import datetime
import logging
import os
import socket
import sys
from libcloud.utils.connection import get_response_object
from libcloud.utils.py3 import b, httplib, urlencode, urlparse, PY3
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
PollingConnection)
from libcloud.common.types import (ProviderError,
LibcloudError)
try:
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import Crypto.Random
Crypto.Random.atfork()
except ImportError:
# The pycrypto library is unavailable
SHA256 = None
RSA = None
PKCS1_v1_5 = None
UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def _utcnow():
"""
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
return datetime.datetime.utcnow()
def _utc_timestamp(datetime_obj):
return datetime_obj.strftime(UTC_TIMESTAMP_FORMAT)
def _from_utc_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, UTC_TIMESTAMP_FORMAT)
def _get_gce_metadata(path=''):
try:
url = 'http://metadata/computeMetadata/v1/' + path.lstrip('/')
headers = {'Metadata-Flavor': 'Google'}
response = get_response_object(url, headers=headers)
return response.status, '', response.body
except Exception as e:
return -1, str(e), None
class GoogleAuthError(LibcloudError):
"""Generic Error class for various authentication errors."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class GoogleBaseError(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(GoogleBaseError, self).__init__(value, http_code, driver)
class InvalidRequestError(GoogleBaseError):
pass
class JsonParseError(GoogleBaseError):
pass
class ResourceNotFoundError(GoogleBaseError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
if isinstance(value, dict) and 'message' in value and \
value['message'].count('/') == 1 and \
value['message'].count('projects/') == 1:
value['message'] = value['message'] + ". A missing project " \
"error may be an authentication issue. " \
"Please ensure your auth credentials match " \
"your project. "
super(GoogleBaseError, self).__init__(value, http_code, driver)
class QuotaExceededError(GoogleBaseError):
pass
class ResourceExistsError(GoogleBaseError):
pass
class ResourceInUseError(GoogleBaseError):
pass
class GoogleResponse(JsonResponse):
"""
Google Base Response class.
"""
def success(self):
"""
Determine if the request was successful.
For the Google response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def _get_error(self, body):
"""
Get the error code and message from a JSON response.
Return just the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: Tuple containing error code and message
:rtype: ``tuple`` of ``str`` or ``int``
"""
if 'errors' in body['error']:
err = body['error']['errors'][0]
else:
err = body['error']
if 'code' in err:
code = err.get('code')
message = err.get('message')
else:
code = err.get('reason', None)
message = body.get('error_description', err)
return (code, message)
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
json_error = False
try:
body = json.loads(self.body)
except:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
valid_http_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.CONFLICT,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status, None)
elif 'error' in body:
(code, message) = self._get_error(body)
if code == 'QUOTA_EXCEEDED':
raise QuotaExceededError(message, self.status, code)
elif code == 'RESOURCE_ALREADY_EXISTS':
raise ResourceExistsError(message, self.status, code)
elif code == 'alreadyExists':
raise ResourceExistsError(message, self.status, code)
elif code.startswith('RESOURCE_IN_USE'):
raise ResourceInUseError(message, self.status, code)
else:
raise GoogleBaseError(message, self.status, code)
else:
return body
elif self.status == httplib.NOT_FOUND:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise ResourceNotFoundError(message, self.status, code)
elif self.status == httplib.BAD_REQUEST:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise InvalidRequestError(message, self.status, code)
else:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise GoogleBaseError(message, self.status, code)
class GoogleBaseDriver(object):
name = "Google API"
class GoogleBaseAuthConnection(ConnectionUserAndKey):
"""
Base class for Google Authentication. Should be subclassed for specific
types of authentication.
"""
driver = GoogleBaseDriver
responseCls = GoogleResponse
name = 'Google Auth'
host = 'accounts.google.com'
auth_path = '/o/oauth2/auth'
def __init__(self, user_id, key=None, scopes=None,
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
login_hint=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:param scopes: A list of urls defining the scope of authentication
to grant.
:type scopes: ``list``
:keyword redirect_uri: The Redirect URI for the authentication
request. See Google OAUTH2 documentation for
more info.
:type redirect_uri: ``str``
:keyword login_hint: Login hint for authentication request. Useful
for Installed Application authentication.
:type login_hint: ``str``
"""
scopes = scopes or []
self.scopes = " ".join(scopes)
self.redirect_uri = redirect_uri
self.login_hint = login_hint
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
def add_default_headers(self, headers):
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Host'] = self.host
return headers
def _token_request(self, request_body):
"""
Return an updated token from a token request body.
:param request_body: A dictionary of values to send in the body of the
token request.
:type request_body: ``dict``
:return: A dictionary with updated token information
:rtype: ``dict``
"""
data = urlencode(request_body)
try:
response = self.request('/o/oauth2/token', method='POST',
data=data)
except AttributeError:
raise GoogleAuthError('Invalid authorization response, please '
'check your credentials and time drift.')
token_info = response.object
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
def refresh_token(self, token_info):
"""
Refresh the current token.
Fetch an updated refresh token from internal metadata service.
:param token_info: Dictionary containing token information.
(Not used, but here for compatibility)
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
return self.get_new_token()
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
"""Authentication connection for "Installed Application" authentication."""
def get_code(self):
"""
Give the user a URL that they can visit to authenticate and obtain a
code. This method will ask for that code that the user can paste in.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Code supplied by the user after authenticating
:rtype: ``str``
"""
auth_params = {'response_type': 'code',
'client_id': self.user_id,
'redirect_uri': self.redirect_uri,
'scope': self.scopes,
'state': 'Libcloud Request'}
if self.login_hint:
auth_params['login_hint'] = self.login_hint
data = urlencode(auth_params)
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
print('\nPlease Go to the following URL and sign in:')
print(url)
if PY3:
code = input('Enter Code: ')
else:
code = raw_input('Enter Code: ')
return code
def get_new_token(self):
"""
Get a new token. Generally used when no previous token exists or there
is no refresh token
:return: Dictionary containing token information
:rtype: ``dict``
"""
# Ask the user for a code
code = self.get_code()
token_request = {'code': code,
'client_id': self.user_id,
'client_secret': self.key,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'}
return self._token_request(token_request)
def refresh_token(self, token_info):
"""
Use the refresh token supplied in the token info to get a new token.
:param token_info: Dictionary containing current token information
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
if 'refresh_token' not in token_info:
return self.get_new_token()
refresh_request = {'refresh_token': token_info['refresh_token'],
'client_id': self.user_id,
'client_secret': self.key,
'grant_type': 'refresh_token'}
new_token = self._token_request(refresh_request)
if 'refresh_token' not in new_token:
new_token['refresh_token'] = token_info['refresh_token']
return new_token
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for "Service Account" authentication."""
def __init__(self, user_id, key, *args, **kwargs):
"""
Check to see if PyCrypto is available, and convert key file path into a
key string if the key is in a file.
:param user_id: Email address to be used for Service Account
authentication.
:type user_id: ``str``
:param key: The RSA Key or path to file containing the key.
:type key: ``str``
"""
if SHA256 is None:
raise GoogleAuthError('PyCrypto library required for '
'Service Account Authentication.')
# Check to see if 'key' is a file and read the file if it is.
if key.find("PRIVATE KEY---") == -1:
# key is a file
keypath = os.path.expanduser(key)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if not is_file_path:
raise ValueError("Missing (or not readable) key "
"file: '%s'" % key)
with open(keypath, 'r') as f:
contents = f.read()
try:
key = json.loads(contents)
key = key['private_key']
except ValueError:
key = contents
logger = logging.getLogger(__name__)
logger.warn('%s not in JSON format. This format is '
'deprecated. Please download a JSON key '
'from the Cloud Console.' % keypath)
super(GoogleServiceAcctAuthConnection, self).__init__(
user_id, key, *args, **kwargs)
def get_new_token(self):
"""
Get a new token using the email address and RSA Key.
:return: Dictionary containing token information
:rtype: ``dict``
"""
# The header is always the same
header = {'alg': 'RS256', 'typ': 'JWT'}
header_enc = base64.urlsafe_b64encode(b(json.dumps(header)))
# Construct a claim set
claim_set = {'iss': self.user_id,
'scope': self.scopes,
'aud': 'https://accounts.google.com/o/oauth2/token',
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
claim_set_enc = base64.urlsafe_b64encode(b(json.dumps(claim_set)))
# The message contains both the header and claim set
message = b'.'.join((header_enc, claim_set_enc))
# Then the message is signed using the key supplied
key = RSA.importKey(self.key)
hash_func = SHA256.new(message)
signer = PKCS1_v1_5.new(key)
signature = base64.urlsafe_b64encode(signer.sign(hash_func))
# Finally the message and signature are sent to get a token
jwt = b'.'.join((message, signature))
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt}
return self._token_request(request)
class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for self-authentication when used with a GCE
istance that supports serviceAccounts.
"""
def get_new_token(self):
"""
Get a new token from the internal metadata service.
:return: Dictionary containing token information
:rtype: ``dict``
"""
path = '/instance/service-accounts/default/token'
http_code, http_reason, token_info = _get_gce_metadata(path)
if http_code == httplib.NOT_FOUND:
raise ValueError("Service Accounts are not enabled for this "
"GCE instance.")
if http_code != httplib.OK:
raise ValueError("Internal GCE Authorization failed: "
"'%s'" % str(http_reason))
token_info = json.loads(token_info)
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
class GoogleAuthType(object):
"""
SA (Service Account),
IA (Installed Application),
GCE (Auth from a GCE instance with service account enabled)
GCS_S3 (Cloud Storage S3 interoperability authentication)
"""
SA = 'SA'
IA = 'IA'
GCE = 'GCE'
GCS_S3 = 'GCS_S3'
ALL_TYPES = [SA, IA, GCE, GCS_S3]
OAUTH2_TYPES = [SA, IA, GCE]
@classmethod
def guess_type(cls, user_id):
if cls._is_sa(user_id):
return cls.SA
elif cls._is_gce():
return cls.GCE
elif cls._is_gcs_s3(user_id):
return cls.GCS_S3
else:
return cls.IA
@classmethod
def is_oauth2(cls, auth_type):
return auth_type in cls.OAUTH2_TYPES
@staticmethod
def _is_gce():
"""
Checks if we can access the GCE metadata server.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
http_code, http_reason, body = _get_gce_metadata()
if http_code == httplib.OK and body:
return True
return False
@staticmethod
def _is_gcs_s3(user_id):
"""
Checks S3 key format: 20 alphanumeric chars starting with GOOG.
"""
return len(user_id) == 20 and user_id.startswith('GOOG')
@staticmethod
def _is_sa(user_id):
return user_id.endswith('@developer.gserviceaccount.com')
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
"""Base connection class for interacting with Google APIs."""
driver = GoogleBaseDriver
responseCls = GoogleResponse
host = 'www.googleapis.com'
poll_interval = 2.0
timeout = 180
credential_file = '~/.google_libcloud_auth'
def __init__(self, user_id, key=None, auth_type=None,
credential_file=None, scopes=None, **kwargs):
"""
Determine authentication type, set up appropriate authentication
connection and get initial authentication information.
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword auth_type: See GoogleAuthType class for list and description
of accepted values.
If not supplied, auth_type will be guessed based
on value of user_id or if the code is running
on a GCE instance.
:type auth_type: ``str``
:keyword credential_file: Path to file for caching authentication
information.
:type credential_file: ``str``
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
read/write access to Compute, Storage, and DNS.
:type scopes: ``list``
"""
self.user_id = user_id
self.key = key
if auth_type and auth_type not in GoogleAuthType.ALL_TYPES:
raise GoogleAuthError('Invalid auth type: %s' % auth_type)
self.auth_type = auth_type or GoogleAuthType.guess_type(user_id)
# OAuth2 stuff and placeholders
self.scopes = scopes
self.oauth2_conn = None
self.oauth2_token = None
if credential_file:
self.credential_file = credential_file
elif self.auth_type == GoogleAuthType.SA:
self.credential_file += '.' + user_id
if GoogleAuthType.is_oauth2(self.auth_type):
self._init_oauth2(**kwargs)
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
self.user_agent_append(ver_platform)
@property
def token_expire_utc_datetime(self):
return _from_utc_timestamp(self.oauth2_token['expire_time'])
def add_default_headers(self, headers):
"""
@inherits: :class:`Connection.add_default_headers`
"""
headers['Content-Type'] = 'application/json'
headers['Host'] = self.host
return headers
def pre_connect_hook(self, params, headers):
"""
Check to make sure that token hasn't expired. If it has, get an
updated token. Also, add the token to the headers.
@inherits: :class:`Connection.pre_connect_hook`
"""
if self.token_expire_utc_datetime < _utcnow():
self._refresh_oauth2_token()
headers['Authorization'] = 'Bearer %s' % (
self.oauth2_token['access_token'])
return params, headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
def request(self, *args, **kwargs):
"""
@inherits: :class:`Connection.request`
"""
# Adds some retry logic for the occasional
# "Connection Reset by peer" error.
retries = 4
tries = 0
while tries < (retries - 1):
try:
return super(GoogleBaseConnection, self).request(
*args, **kwargs)
except socket.error:
e = sys.exc_info()[1]
if e.errno == errno.ECONNRESET:
tries = tries + 1
else:
raise e
# One more time, then give up.
return super(GoogleBaseConnection, self).request(*args, **kwargs)
def has_completed(self, response):
"""
Determine if operation has completed based on response.
:param response: JSON response
:type response: I{responseCls}
:return: True if complete, False otherwise
:rtype: ``bool``
"""
if response.object['status'] == 'DONE':
return True
else:
return False
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
"""
return {'action': response.object['selfLink']}
def morph_action_hook(self, action):
"""
Update action to correct request path.
In many places, the Google API returns a full URL to a resource.
This will strip the scheme and host off of the path and just return
the request. Otherwise, it will prepend the base request_path to
the action.
:param action: The action to be called in the http request
:type action: ``str``
:return: The modified request based on the action
:rtype: ``str``
"""
if action.startswith('https://'):
u = urlparse.urlsplit(action)
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
else:
request = self.request_path + action
return request
def _refresh_oauth2_token(self):
self.oauth2_token = self.oauth2_conn.refresh_token(self.oauth2_token)
self._write_token_to_file()
def _init_oauth2(self, **kwargs):
# Default scopes to read/write for compute, storage, and dns. Can
# override this when calling get_driver() or setting in secrets.py
if not self.scopes:
self.scopes = [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
]
self.oauth2_token = self._get_token_from_file()
if self.auth_type == GoogleAuthType.GCE:
self.oauth2_conn = GoogleGCEServiceAcctAuthConnection(
self.user_id, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.SA:
self.oauth2_conn = GoogleServiceAcctAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.IA:
self.oauth2_conn = GoogleInstalledAppAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
else:
raise GoogleAuthError('Invalid auth_type: %s' %
str(self.auth_type))
if self.oauth2_token is None:
self.oauth2_token = self.oauth2_conn.get_new_token()
self._write_token_to_file()
def _get_token_from_file(self):
"""
Read credential file and return token information.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Token information dictionary, or None
:rtype: ``dict`` or ``None``
"""
token = None
filename = os.path.realpath(os.path.expanduser(self.credential_file))
try:
with open(filename, 'r') as f:
data = f.read()
token = json.loads(data)
except IOError:
pass
return token
def _write_token_to_file(self):
"""
Write token to credential file.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
filename = os.path.realpath(os.path.expanduser(self.credential_file))
data = json.dumps(self.oauth2_token)
with open(filename, 'w') as f:
f.write(data)
|
jimbobhickville/libcloud
|
libcloud/common/google.py
|
Python
|
apache-2.0
| 29,589
|
[
"VisIt"
] |
f2e912863d9f7581b341fc05d5f91de4a05057790deb4e20945c68eb5618211b
|
from .. import nengo as nengo
## This example demonstrates how to create a neuronal ensemble containing a single neuron.
##
## Network diagram:
##
## [Input] ---> (Neuron)
##
##
## Network behaviour:
## A = Input
##
# Create the nengo model
model = nengo.Model('Single Neuron') # Create the network
# Create the model inputs
model.make_node('Input', [-0.45]) # A controllable input with a
# starting value of -0.45
# Create the neuronal ensemble
model.make_ensemble('Neuron', 1, 1, # Make 1 neuron representing
max_rate = (100, 100), # 1 dimension, with a maximum
intercept = (-0.5, -0.5), # firing rate of 100, a
encoders = [[1]]) # tuning curve x-intercept of
# -0.5, encoder of 1 (i.e. it
# responds more to positive
# values)
model.noise = 3 # Set the neural noise to have a
# variance of 3
# Create the connections within the model
model.connect('Input', 'Neuron') # Connect the input to the neuron
# Build the model
model.build() # Generate model parameters
# Run the model
model.run(1) # Run for 1 second
|
jaberg/nengo
|
examples/singleneuron.py
|
Python
|
mit
| 1,505
|
[
"NEURON"
] |
67641a9a2e82d9cddf0c4740bf7c45c8ac303202d0e61b887d60ac0146f491ec
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
import re
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import FormatRegistry
__all__ = ['load_mol2', "mol2_to_dataframes"]
@FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
atoms_mdtraj["element"] = atoms.atype.apply(lambda x: x.strip(".")[0])
atoms_mdtraj["resSeq"] = np.ones(len(atoms), 'int')
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
if bonds is not None:
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
else:
bonds_mdtraj = None
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
# Mol2 can have "status bits" at the end of the bond lines. We don't care
# about these, but they interfere with using pd_read_table because it looks
# like one line has too many columns. So we just regex out the offending
# text.
status_bit_regex = "BACKBONE|DICT|INTERRES|\|"
data["@<TRIPOS>BOND\n"] = [re.sub(status_bit_regex, lambda _: "", s)
for s in data["@<TRIPOS>BOND\n"]]
if len(data["@<TRIPOS>BOND\n"]) > 1:
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s*", engine='python')
else:
bonds_frame = None
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s*", engine='python', header=None)
ncols = atoms_frame.shape[1]
names=["serial", "name", "x", "y", "z", "atype", "code", "resName", "charge", "status"]
atoms_frame.columns = names[:ncols]
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
|
tcmoore3/mdtraj
|
mdtraj/formats/mol2.py
|
Python
|
lgpl-2.1
| 8,106
|
[
"MDTraj",
"OpenMM"
] |
dcac72a720b46d72ec7262991e1ae4c3114d13184ac87734177d00dbc6d9a302
|
#!/usr/bin/env python
##############################################################################
#
# Usage example for the procedure PPXF, which
# implements the Penalized Pixel-Fitting (pPXF) method by
# Cappellari M., & Emsellem E., 2004, PASP, 116, 138.
# The example also shows how to include a library of templates
# and how to mask gas emission lines if present.
# The example is specialized for a fit to a SDSS spectrum.
#
# MODIFICATION HISTORY:
# V1.0.0: Written by Michele Cappellari, Leiden 11 November 2003
# V1.1.0: Log rebin the galaxy spectrum. Show how to correct the velocity
# for the difference in starting wavelength of galaxy and templates.
# MC, Vicenza, 28 December 2004
# V1.1.1: Included explanation of correction for instrumental resolution.
# After feedback from David Valls-Gabaud. MC, Venezia, 27 June 2005
# V2.0.0: Included example routine to determine the goodPixels vector
# by masking known gas emission lines. MC, Oxford, 30 October 2008
# V2.0.1: Included instructions for high-redshift usage. Thanks to Paul Westoby
# for useful feedback on this issue. MC, Oxford, 27 November 2008
# V2.0.2: Included example for obtaining the best-fitting redshift.
# MC, Oxford, 14 April 2009
# V2.1.0: Bug fix: Force PSF_GAUSSIAN to produce a Gaussian with an odd
# number of elements centered on the middle one. Many thanks to
# Harald Kuntschner, Eric Emsellem, Anne-Marie Weijmans and
# Richard McDermid for reporting problems with small offsets
# in systemic velocity. MC, Oxford, 15 February 2010
# V2.1.1: Added normalization of galaxy spectrum to avoid numerical
# instabilities. After feedback from Andrea Cardullo.
# MC, Oxford, 17 March 2010
# V2.2.0: Perform templates convolution in linear wavelength.
# This is useful for spectra with large wavelength range.
# MC, Oxford, 25 March 2010
# V2.2.1: Updated for Coyote Graphics. MC, Oxford, 11 October 2011
# V2.3.0: Specialized for SDSS spectrum following requests from users.
# Renamed PPXF_KINEMATICS_EXAMPLE_SDSS. MC, Oxford, 12 January 2012
# V3.0.0: Translated from IDL into Python. MC, Oxford, 10 December 2013
# V3.0.1: Uses MILES models library. MC, Oxford 11 December 2013
# V3.0.2: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014
# V3.0.3: Explicitly sort template files as glob() output may not be sorted.
# Thanks to Marina Trevisan for reporting problems under Linux.
# MC, Sydney, 4 February 2015
# V3.0.4: Use redshift in determine_goodpixels. MC, Oxford, 5 May 2015
# V3.1.0: Illustrate how to deal with variable instrumental resolution.
# Use example galaxy spectrum from SDSS DR12. MC, Oxford, 12 October 2015
# V3.1.1: Support both Pyfits and Astropy to read FITS files.
# MC, Oxford, 22 October 2015
#
##############################################################################
from __future__ import print_function
from astropy.io import fits
import numpy as np
import glob
from time import clock
from ppxf import ppxf
import ppxf_util as util
def ppxf_kinematics_example_sdss():
# Read SDSS DR12 galaxy spectrum taken from here http://dr12.sdss3.org/
# The spectrum is *already* log rebinned by the SDSS DR12
# pipeline and log_rebin should not be used in this case.
file = 'spectra/NGC4636_SDSS_DR12.fits'
hdu = fits.open(file)
t = hdu['COADD'].data
z = 0.003129 # SDSS redshift estimate
# Only use the wavelength range in common between galaxy and stellar library.
mask = (t['loglam'] > np.log10(3540)) & (t['loglam'] < np.log10(7409))
flux = t['flux'][mask]
galaxy = flux/np.median(flux) # Normalize spectrum to avoid numerical issues
loglam_gal = t['loglam'][mask]
lam_gal = 10**loglam_gal
noise = galaxy*0 + 0.0166 # Assume constant noise per pixel here
c = 299792.458 # speed of light in km/s
frac = lam_gal[1]/lam_gal[0] # Constant lambda fraction per pixel
dlam_gal = (frac - 1)*lam_gal # Size of every pixel in Angstrom
wdisp = t['wdisp'][mask] # Intrinsic dispersion of every pixel, in pixels units
fwhm_gal = 2.355*wdisp*dlam_gal # Resolution FWHM of every pixel, in Angstroms
velscale = np.log(frac)*c # Constant velocity scale in km/s per pixel
# If the galaxy is at a significant redshift (z > 0.03), one would need to apply
# a large velocity shift in PPXF to match the template to the galaxy spectrum.
# This would require a large initial value for the velocity (V > 1e4 km/s)
# in the input parameter START = [V,sig]. This can cause PPXF to stop!
# The solution consists of bringing the galaxy spectrum roughly to the
# rest-frame wavelength, before calling PPXF. In practice there is no
# need to modify the spectrum in any way, given that a red shift
# corresponds to a linear shift of the log-rebinned spectrum.
# One just needs to compute the wavelength range in the rest-frame
# and adjust the instrumental resolution of the galaxy observations.
# This is done with the following three commented lines:
#
# lam_gal = lam_gal/(1+z) # Compute approximate restframe wavelength
# fwhm_gal = fwhm_gal/(1+z) # Adjust resolution in Angstrom
# Read the list of filenames from the Single Stellar Population library
# by Vazdekis (2010, MNRAS, 404, 1639) http://miles.iac.es/. A subset
# of the library is included for this example with permission
vazdekis = glob.glob('miles_models/Mun1.30Z*.fits')
fwhm_tem = 2.51 # Vazdekis+10 spectra have a constant resolution FWHM of 2.51A.
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the SDSS galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
#
hdu = fits.open(vazdekis[0])
ssp = hdu[0].data
h2 = hdu[0].header
lam_temp = h2['CRVAL1'] + h2['CDELT1']*np.arange(h2['NAXIS1'])
lamRange_temp = [np.min(lam_temp), np.max(lam_temp)]
sspNew, logLam2, velscale = util.log_rebin(lamRange_temp, ssp, velscale=velscale)
templates = np.empty((sspNew.size, len(vazdekis)))
# Interpolates the galaxy spectral resolution at the location of every pixel
# of the templates. Outside the range of the galaxy spectrum the resolution
# will be extrapolated, but this is irrelevant as those pixels cannot be
# used in the fit anyway.
fwhm_gal = np.interp(lam_temp, lam_gal, fwhm_gal)
# Convolve the whole Vazdekis library of spectral templates
# with the quadratic difference between the SDSS and the
# Vazdekis instrumental resolution. Logarithmically rebin
# and store each template as a column in the array TEMPLATES.
# Quadratic sigma difference in pixels Vazdekis --> SDSS
# The formula below is rigorously valid if the shapes of the
# instrumental spectral profiles are well approximated by Gaussians.
#
# In the line below, the fwhm_dif is set to zero when fwhm_gal < fwhm_tem.
# In principle it should never happen and a higher resolution template should be used.
#
fwhm_dif = np.sqrt((fwhm_gal**2 - fwhm_tem**2).clip(0))
sigma = fwhm_dif/2.355/h2['CDELT1'] # Sigma difference in pixels
for j, fname in enumerate(vazdekis):
hdu = fits.open(fname)
ssp = hdu[0].data
ssp = util.gaussian_filter1d(ssp, sigma) # perform convolution with variable sigma
sspNew, logLam2, velscale = util.log_rebin(lamRange_temp, ssp, velscale=velscale)
templates[:, j] = sspNew/np.median(sspNew) # Normalizes templates
# The galaxy and the template spectra do not have the same starting wavelength.
# For this reason an extra velocity shift DV has to be applied to the template
# to fit the galaxy spectrum. We remove this artificial shift by using the
# keyword VSYST in the call to PPXF below, so that all velocities are
# measured with respect to DV. This assume the redshift is negligible.
# In the case of a high-redshift galaxy one should de-redshift its
# wavelength to the rest frame before using the line below (see above).
#
c = 299792.458
dv = np.log(lam_temp[0]/lam_gal[0])*c # km/s
goodpixels = util.determine_goodpixels(np.log(lam_gal), lamRange_temp, z)
# Here the actual fit starts. The best fit is plotted on the screen.
# Gas emission lines are excluded from the pPXF fit using the GOODPIXELS keyword.
#
vel = c*np.log(1 + z) # Initial estimate of the galaxy velocity in km/s
start = [vel, 200., 0, 0] # (km/s), starting guess for [V,sigma]
t = clock()
pp = ppxf(templates, galaxy, noise, velscale, start,
goodpixels=goodpixels, plot=True, moments=4,
degree=12, vsyst=dv, clean=False)
print("Formal errors:")
print(" dV dsigma dh3 dh4")
print("".join("%8.2g" % f for f in pp.error*np.sqrt(pp.chi2)))
print('Elapsed time in PPXF: %.2f s' % (clock() - t))
# If the galaxy is at significant redshift z and the wavelength has been
# de-redshifted with the three lines "z = 1.23..." near the beginning of
# this procedure, the best-fitting redshift is now given by the following
# commented line (equation 2 of Cappellari et al. 2009, ApJ, 704, L34):
#
#print, 'Best-fitting redshift z:', (z + 1)*(1 + sol[0]/c) - 1
#------------------------------------------------------------------------------
if __name__ == '__main__':
ppxf_kinematics_example_sdss()
import matplotlib.pyplot as plt
plt.show()
|
cebarbosa/fossilgroups
|
ppxf/ppxf_kinematics_example_sdss.py
|
Python
|
gpl-3.0
| 9,679
|
[
"Galaxy",
"Gaussian"
] |
98d10a77b72f6368e9fa1855a02083435133a680a5b096a92178af29b517a0cc
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import vtk
class VTKtoITKF3(NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
# setup the pipeline
self._imageCast = vtk.vtkImageCast()
self._imageCast.SetOutputScalarTypeToFloat()
self._vtk2itk = itk.VTKImageToImageFilter[itk.Image[itk.F, 3]].New()
self._vtk2itk.SetInput(self._imageCast.GetOutput())
NoConfigModuleMixin.__init__(
self,
{'Module (self)' : self,
'vtkImageCast' : self._imageCast,
'VTKImageToImageFilter' : self._vtk2itk})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
ModuleBase.close(self)
del self._imageCast
del self._vtk2itk
def execute_module(self):
# the whole connectvtkitk thingy is quite shaky and was really
# designed for demand-driven use. using it in an event-driven
# environment, we have to make sure it does exactly what we want
# it to do. one day, we'll implement contracts and do this
# differently.
#o = self._itkImporter.GetOutput()
#o.UpdateOutputInformation()
#o.SetRequestedRegionToLargestPossibleRegion()
#o.Update()
self._vtk2itk.Update()
def get_input_descriptions(self):
return ('VTK Image Data',)
def set_input(self, idx, inputStream):
self._imageCast.SetInput(inputStream)
def get_output_descriptions(self):
return ('ITK Image (3D, float)',)
def get_output(self, idx):
return self._vtk2itk.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
|
nagyistoce/devide
|
modules/insight/VTKtoITKF3.py
|
Python
|
bsd-3-clause
| 2,339
|
[
"VTK"
] |
8375b5fc1fb2200d93da5c6dc62d369d3c5380fa483e011eab74b151310d8493
|
import logging
from abc import ABCMeta
from abc import abstractmethod
from io import BytesIO
from string import Template
from urllib.parse import urlencode, urljoin
from galaxy.util import unicodify
from .util import copy_to_path
log = logging.getLogger(__name__)
class PulsarInterface(metaclass=ABCMeta):
"""
Abstract base class describes how synchronous client communicates with
(potentially remote) Pulsar procedures. Obvious implementation is HTTP based
but Pulsar objects wrapped in routes can also be directly communicated with
if in memory.
"""
@abstractmethod
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
"""
Execute the correspond command against configured Pulsar job manager. Arguments are
method parameters and data or input_path describe essentially POST bodies. If command
results in a file, resulting path should be specified as output_path.
"""
COMMAND_TO_PATH = {
"path": Template("jobs/${job_id}/files/path"),
"upload_file": Template("jobs/${job_id}/files"),
"download_output": Template("jobs/${job_id}/files"),
"setup": Template("jobs"),
"clean": Template("jobs/${job_id}"),
"status": Template("jobs/${job_id}/status"),
"cancel": Template("jobs/${job_id}/cancel"),
"submit": Template("jobs/${job_id}/submit"),
"file_available": Template("cache/status"),
"cache_required": Template("cache"),
"cache_insert": Template("cache"),
"object_store_exists": Template("objects/${object_id}/exists"),
"object_store_file_ready": Template("objects/${object_id}/file_ready"),
"object_store_update_from_file": Template("objects/${object_id}"),
"object_store_create": Template("objects/${object_id}"),
"object_store_empty": Template("objects/${object_id}/empty"),
"object_store_size": Template("objects/${object_id}/size"),
"object_store_delete": Template("objects/${object_id}"),
"object_store_get_data": Template("objects/${object_id}"),
"object_store_get_filename": Template("objects/${object_id}/filename"),
"object_store_get_store_usage_percent": Template("object_store_usage_percent")
}
COMMAND_TO_METHOD = {
"upload_file": "POST",
"download_output": "GET",
"setup": "POST",
"submit": "POST",
"clean": "DELETE",
"cancel": "PUT",
"object_store_update_from_file": "PUT",
"object_store_create": "POST",
"object_store_delete": "DELETE",
"file_available": "GET",
"cache_required": "PUT",
"cache_insert": "POST",
}
class HttpPulsarInterface(PulsarInterface):
def __init__(self, destination_params, transport):
self.transport = transport
remote_host = destination_params.get("url")
assert remote_host is not None, "Failed to determine url for Pulsar client."
if not remote_host.startswith("http"):
remote_host = "http://%s" % remote_host
manager = destination_params.get("manager", None)
if manager:
if "/managers/" in remote_host:
log.warning("Ignoring manager tag '%s', Pulsar client URL already contains a \"/managers/\" path." % manager)
else:
remote_host = urljoin(remote_host, "managers/%s" % manager)
if not remote_host.endswith("/"):
remote_host = "%s/" % remote_host
self.remote_host = remote_host
self.private_token = destination_params.get("private_token", None)
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
url = self.__build_url(command, args)
method = COMMAND_TO_METHOD.get(command, None) # Default to GET is no data, POST otherwise
response = self.transport.execute(url, method=method, data=data, input_path=input_path, output_path=output_path)
return response
def __build_url(self, command, args):
if args is None:
args = {}
path = COMMAND_TO_PATH.get(command, Template(command)).safe_substitute(args)
if self.private_token:
args["private_token"] = self.private_token
arg_bytes = {k: unicodify(args[k]).encode('utf-8') for k in args}
data = urlencode(arg_bytes)
url = self.remote_host + path + "?" + data
return url
class LocalPulsarInterface(PulsarInterface):
def __init__(self, destination_params, job_manager=None, pulsar_app=None, file_cache=None, object_store=None):
if job_manager is None:
job_manager_name = destination_params.get("manager", None)
if job_manager_name is None:
job_manager = pulsar_app.only_manager
else:
job_manager = pulsar_app.managers[job_manager_name]
self.job_manager = job_manager
self.file_cache = file_cache
self.object_store = object_store
def __app_args(self):
# Arguments that would be specified from PulsarApp if running
# in web server.
return {
'manager': self.job_manager,
'file_cache': self.file_cache,
'object_store': self.object_store,
'ip': None
}
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
if args is None:
args = {}
# If data set, should be unicode (on Python 2) or str (on Python 3).
from pulsar.web import routes
from pulsar.web.framework import build_func_args
controller = getattr(routes, command)
action = controller.func
body_args = dict(body=self.__build_body(data, input_path))
args = build_func_args(action, args.copy(), self.__app_args(), body_args)
result = action(**args)
if controller.response_type != 'file':
return controller.body(result)
else:
with open(result, 'rb') as result_file:
copy_to_path(result_file, output_path)
def __build_body(self, data, input_path):
if data is not None:
return BytesIO(data)
elif input_path is not None:
return open(input_path, 'rb')
else:
return None
|
galaxyproject/pulsar
|
pulsar/client/server_interface.py
|
Python
|
apache-2.0
| 6,188
|
[
"Galaxy"
] |
668919d78545db03453c9e04c8ec6218397e121766f4e77232a6cf4e0af11f63
|
from aiida.parsers.parser import Parser
from aiida.parsers.exceptions import OutputParsingError
from aiida.orm.data.array import ArrayData
from aiida.orm.data.structure import StructureData
from aiida.orm.data.parameter import ParameterData
import numpy as np
def read_log_file(logfile):
f = open(logfile, 'r')
data = f.readlines()
data_dict = {}
for i, line in enumerate(data):
if 'Loop time' in line:
energy = float(data[i-1].split()[4])
data_dict['energy'] = energy
xx, yy, zz, xy, xz, yz = data[i-1].split()[5:11]
stress = np.array([[xx, xy, xz],
[xy, yy, yz],
[xz, yz, zz]], dtype=float)
if '$(xlo)' in line:
a = data[i+1].split()
if '$(ylo)' in line:
b = data[i+1].split()
if '$(zlo)' in line:
c = data[i+1].split()
bounds = np.array([a, b, c], dtype=float)
# lammps_input_file += 'print "$(xlo) $(xhi) $(xy)"\n'
# lammps_input_file += 'print "$(ylo) $(yhi) $(xz)"\n'
# lammps_input_file += 'print "$(zlo) $(zhi) $(yz)"\n'
xy = bounds[0, 2]
xz = bounds[1, 2]
yz = bounds[2, 2]
xlo = bounds[0, 0]
xhi = bounds[0, 1]
ylo = bounds[1, 0]
yhi = bounds[1, 1]
zlo = bounds[2, 0]
zhi = bounds[2, 1]
super_cell = np.array([[xhi-xlo, xy, xz],
[0, yhi-ylo, yz],
[0, 0, zhi-zlo]])
cell=super_cell.T
if np.linalg.det(cell) < 0:
cell = -1.0*cell
volume = np.linalg.det(cell)
stress = -stress/volume * 1.e-3 # bar*A^3 -> kbar
return data_dict, cell, stress
def read_lammps_positions_and_forces(file_name):
import mmap
# Time in picoseconds
# Coordinates in Angstroms
# Starting reading
# Dimensionality of LAMMP calculation
number_of_dimensions = 3
with open(file_name, "r+") as f:
file_map = mmap.mmap(f.fileno(), 0)
# Read time steps
while True:
position_number=file_map.find('TIMESTEP')
try:
file_map.seek(position_number)
file_map.readline()
except ValueError:
break
#Read number of atoms
position_number=file_map.find('NUMBER OF ATOMS')
file_map.seek(position_number)
file_map.readline()
number_of_atoms = int(file_map.readline())
#Read cell
position_number=file_map.find('ITEM: BOX')
file_map.seek(position_number)
file_map.readline()
bounds = []
for i in range(3):
bounds.append(file_map.readline().split())
bounds = np.array(bounds, dtype=float)
if bounds.shape[1] == 2:
bounds = np.append(bounds, np.array([0, 0, 0])[None].T ,axis=1)
xy = bounds[0, 2]
xz = bounds[1, 2]
yz = bounds[2, 2]
xlo = bounds[0, 0] - np.min([0.0, xy, xz, xy+xz])
xhi = bounds[0, 1] - np.max([0.0, xy, xz, xy+xz])
ylo = bounds[1, 0] - np.min([0.0, yz])
yhi = bounds[1, 1] - np.max([0.0, yz])
zlo = bounds[2, 0]
zhi = bounds[2, 1]
super_cell = np.array([[xhi-xlo, xy, xz],
[0, yhi-ylo, yz],
[0, 0, zhi-zlo]])
cell=super_cell.T
position_number = file_map.find('ITEM: ATOMS')
file_map.seek(position_number)
file_map.readline()
#Reading positions
positions = []
forces = []
read_elements = []
for i in range (number_of_atoms):
line = file_map.readline().split()[0:number_of_dimensions*2+1]
positions.append(line[1:number_of_dimensions+1])
forces.append(line[1+number_of_dimensions:number_of_dimensions*2+1])
read_elements.append(line[0])
file_map.close()
positions = np.array([positions])
forces = np.array([forces], dtype=float)
return positions, forces, read_elements, cell
class OptimizeParser(Parser):
"""
Simple Parser for LAMMPS.
"""
def __init__(self, calc):
"""
Initialize the instance of LammpsParser
"""
super(OptimizeParser, self).__init__(calc)
def parse_with_retrieved(self, retrieved):
"""
Parses the datafolder, stores results.
"""
# suppose at the start that the job is successful
successful = True
# select the folder object
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
self.logger.error("No retrieved folder found")
return False, ()
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# OUTPUT file should exist
if not self._calc._OUTPUT_FILE_NAME in list_of_files:
successful = False
self.logger.error("Output file not found")
return successful, ()
# Get file and do the parsing
outfile = out_folder.get_abs_path( self._calc._OUTPUT_FILE_NAME)
ouput_trajectory = out_folder.get_abs_path( self._calc._OUTPUT_TRAJECTORY_FILE_NAME)
output_data, cell, stress_tensor = read_log_file(outfile)
positions, forces, symbols, cell2 = read_lammps_positions_and_forces(ouput_trajectory)
# look at warnings
warnings = []
with open(out_folder.get_abs_path( self._calc._SCHED_ERROR_FILE )) as f:
errors = f.read()
if errors:
warnings = [errors]
# ====================== prepare the output node ======================
# save the outputs
new_nodes_list = []
# save optimized structure into node
structure = StructureData(cell=cell)
for i, position in enumerate(positions[-1]):
structure.append_atom(position=position.tolist(),
symbols=symbols[i])
new_nodes_list.append(('output_structure', structure))
# save forces into node
array_data = ArrayData()
array_data.set_array('forces', forces)
array_data.set_array('stress', stress_tensor)
new_nodes_list.append(('output_array', array_data))
# add the dictionary with warnings
output_data.update({'warnings': warnings})
parameters_data = ParameterData(dict=output_data)
new_nodes_list.append((self.get_linkname_outparams(), parameters_data))
# add the dictionary with warnings
# new_nodes_list.append((self.get_linkname_outparams(), ParameterData(dict={'warnings': warnings})))
return successful, new_nodes_list
|
abelcarreras/aiida_extensions
|
plugins/parsers/lammps/optimize.py
|
Python
|
mit
| 6,836
|
[
"LAMMPS"
] |
a2f8df56e36f3b1ebdea1dc2e76fd4b3dc6ff43ca5bcbfe58941c232aacfd326
|
# File:
# rectilinear_contour_xarray.py
#
# Synopsis:
# Demonstrate the use of xarray to read a netCDF file.
# Create a contour plot.
#
# Category:
# xarray
# netCDF
# contour
#
# Author:
# Karin Meier-Fleischer
#
# Date of initial publication:
# January, 2019
#
# Description:
# Demonstrate the use of xarray to read a netCDF file.
# Create a contour plot.
#
# Effects illustrated:
# o Read netCDF file with xarray
# o Create contour plot
#
# Output:
# A single visualization is produced.
#
'''
DKRZ Python Example: rectilinear_contour_xarray.py
- Read netCDF file
- Create contour plot
'''
from __future__ import print_function
import os
import numpy as np
import xarray as xr
import Ngl
#-----------------------------------------------------------------------
#-- Function: add_titles(wks, plot, title, left, center, right, xtitle, ytitle)
#-----------------------------------------------------------------------
def ngl_Strings(wks, plot, title='', left='', center='', right='', xtitle='', ytitle=''):
vpx = Ngl.get_float(plot,'vpXF') #-- retrieve value of res.vpXF from plot
vpy = Ngl.get_float(plot,'vpYF') #-- retrieve value of res.vpYF from plot
vpw = Ngl.get_float(plot,'vpWidthF') #-- retrieve value of res.vpWidthF from plot
vph = Ngl.get_float(plot,'vpHeightF') #-- retrieve value of res.vpHeightF from plot
ymax = vpy+0.08 #-- we need space for the title and strings
if(ymax > 0.98):
print("--> if you can't see the title use res.nglMaximize = False and/or set res.vpYF")
#-- add title
if(title != ''):
tires = Ngl.Resources()
tires.txFontHeightF = 0.016
tires.txJust = 'CenterCenter'
tires.txFont = 22 #-- Font 22: Helvetica bold
if(left != '' or center != '' or right != ''):
y = vpy + 0.075
else:
y = vpy + 0.05
Ngl.text_ndc(wks, title, 0.5, y, tires)
#-- add left, center and/or right string
txres = Ngl.Resources()
txres.txFontHeightF = 0.020 #-- font size for left, center and right string
y = vpy + 0.035 #-- y-position
if(left != ''):
txres.txJust = 'CenterLeft' #-- text justification
x = vpx #-- x-position
Ngl.text_ndc(wks, left, x, y, txres) #-- add text to wks
if(center != ''):
txres.txJust = 'CenterCenter' #-- text justification
Ngl.text_ndc(wks, center, 0.5, y, txres) #-- add text to wks
if(right != ''):
txres.txJust = 'CenterRight' #-- text justification
x = vpx+vpw #-- x-position
Ngl.text_ndc(wks, right, x, y, txres) #-- add text to wks
#-- add y-axis title string
txtires = Ngl.Resources()
txtires.txFontHeightF = 0.024 #-- font size for x-axis title string
txtires.txAngleF = 90.0
txtires.txJust = 'CenterCenter' #-- text justification
y = vpy - vph/2 #-- y-position
x = vpx - 0.12
Ngl.text_ndc(wks, ytitle, x, y, txtires) #-- add text to wks
#-----------------------------------------------------------------------
#-- Function: main
#-----------------------------------------------------------------------
def main():
#-- open file and read variable and time
home = os.environ.get('HOME')
fname = os.path.join(home,'/NCL/PyNGL/User_Guide_examples/rectilinear_grid_2D.nc')
#-- open file and read variables
f = xr.open_dataset(home+fname)
temp = f['tsurf'][0,::-1,:] #-- first time step, reverse latitude
lat = f['lat'][::-1] #-- reverse latitudes
lon = f['lon'][:] #-- all longitudes
if temp.attrs['long_name']:
lname = temp.attrs['long_name'] #-- retrieve variable long_name
if temp.attrs['units']:
units = temp.attrs['units'] #-- retrieve variable units
title = 'PyNGL: contour plot' #-- set title string
#-- open a workstation
wkres = Ngl.Resources() #-- generate an res object for workstation
wkres.wkColorMap = 'rainbow' #-- choose colormap
wks = Ngl.open_wks('png','plot_rectilinear_cont_xarray',wkres) #-- open workstation
#-- set resources
res = Ngl.Resources() #-- generate a resource object
res.nglMaximize = False
res.nglPointTickmarksOutward = True #-- point tickmarks outward
res.nglDraw = False
res.nglFrame = False
res.vpXF = 0.1 #-- viewport x-position
res.vpYF = 0.8 #-- viewport y-position
res.vpWidthF = 0.7 #-- viewport width
res.vpHeightF = 0.7 #-- viewport height
res.cnFillOn = True #-- turn on contour fill
res.cnLineLabelsOn = False #-- turn off line labels
res.cnInfoLabelOn = False #-- turn off info label
res.cnLevelSelectionMode = 'ManualLevels' #-- define your own contour levels
res.cnMinLevelValF = 250. #-- minimum contour value
res.cnMaxLevelValF = 315. #-- maximum contour value
res.cnLevelSpacingF = 5. #-- contour increment
res.lbRightMarginF = 0.8 #-- move labelbar to the left
res.sfXArray = lon.values #-- lon as np.ndarray
res.sfYArray = lat.values #-- lat as np.ndarray
#-- create the plot
plot = Ngl.contour_map(wks,temp,res) #-- draw contours over a map
#-- add additional strings to plot (like NCL's gsnLeftString and gsnRightString)
ngl_Strings(wks, plot, title=title, left=lname, right=units, ytitle=lname)
#-- done
Ngl.draw(plot)
Ngl.frame(wks)
Ngl.end()
#-------------------------------------------------------------
#-- run main
#-------------------------------------------------------------
if __name__ == '__main__':
main()
|
KMFleischer/PyEarthScience
|
Visualization/PyNGL/contour_plot_rectilinear_xarray.py
|
Python
|
mit
| 6,438
|
[
"NetCDF"
] |
2d5bb599ce1a3d05f4d23cdb4411360378b8c6111a7722c07d133b84816a566d
|
import numpy as np
import skimage
from PIL import Image, ImageEnhance
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from skimage import color, exposure, filters
from skimage.util import random_noise
def _elastic_transform_2d(img, sigma=6, alpha=36, random=False):
def _calc_delta(shape, alpha, sigma):
return alpha * gaussian_filter((random.rand(*shape) * 2 - 1),
sigma, mode='constant', cval=0)
assert img.ndim == 2
if random is False:
random = np.random.RandomState(None)
shape = img.shape
dx = _calc_delta(shape, alpha, sigma)
dy = _calc_delta(shape, alpha, sigma)
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x+dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
return map_coordinates(img, indices, order=1).reshape(shape)
def elastic_transform(img, sigma=6, alpha=36, random=False):
if img.ndim == 2:
ret = _elastic_transform_2d(img, sigma, alpha, random)
elif img.ndim == 3:
gray_img = color.rgb2gray(img)
ret = _elastic_transform_2d(gray_img, sigma, alpha, random)
ret = color.gray2rgb(ret)
else:
pass
return ret
def gaussian_blur(img, sigma=1, multichannel=True):
return filters.gaussian(image=img, sigma=sigma,
multichannel=multichannel)
def add_noise(img, sigma=0.155):
return random_noise(img, var=sigma**2)
def add_salt_and_pepper_noise(img, salt_vs_pepper=0.5):
return random_noise(img, mode='s&p', salt_vs_pepper=salt_vs_pepper)
def contrast(img, value=1.0):
img = ImageEnhance.Contrast(Image.fromarray(
np.uint8(img))).enhance(value)
return np.asarray(img)
def brightness(img, value=1.0):
img = ImageEnhance.Brightness(
Image.fromarray(np.uint8(img))).enhance(value)
return np.asarray(img)
def saturation(img, value=1.0):
img = ImageEnhance.Color(Image.fromarray(np.uint8(img))).enhance(value)
return np.asarray(img)
def sharpness(img, value=1.0):
img = Image.fromarray(np.uint8(img))
img = ImageEnhance.Sharpness(img).enhance(value)
return np.asarray(img)
def gamma_adjust(img, gamma=1., gain=1.):
gamma_corrected = exposure.adjust_gamma(img, gamma, gain)
return gamma_corrected
|
Swall0w/clib
|
clib/transforms/imageprocessing.py
|
Python
|
mit
| 2,373
|
[
"Gaussian"
] |
b889e526de9039b0685d6cbe47a8996825d0c79026428d5a7c15284c0dc05833
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this test covers a lot of the code in vtkAbstractTransform that
# is not covered elsewhere
# create a rendering window
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.SetSize(600,300)
# set up first set of polydata
p1 = vtk.vtkPlaneSource()
p1.SetOrigin(0.5,0.508,-0.5)
p1.SetPoint1(-0.5,0.508,-0.5)
p1.SetPoint2(0.5,0.508,0.5)
p1.SetXResolution(5)
p1.SetYResolution(5)
p1.Update()
p2 = vtk.vtkPlaneSource()
p2.SetOrigin(-0.508,0.5,-0.5)
p2.SetPoint1(-0.508,-0.5,-0.5)
p2.SetPoint2(-0.508,0.5,0.5)
p2.SetXResolution(5)
p2.SetYResolution(5)
p2.Update()
p3 = vtk.vtkPlaneSource()
p3.SetOrigin(-0.5,-0.508,-0.5)
p3.SetPoint1(0.5,-0.508,-0.5)
p3.SetPoint2(-0.5,-0.508,0.5)
p3.SetXResolution(5)
p3.SetYResolution(5)
p3.Update()
p4 = vtk.vtkPlaneSource()
p4.SetOrigin(0.508,-0.5,-0.5)
p4.SetPoint1(0.508,0.5,-0.5)
p4.SetPoint2(0.508,-0.5,0.5)
p4.SetXResolution(5)
p4.SetYResolution(5)
p4.Update()
p5 = vtk.vtkPlaneSource()
p5.SetOrigin(0.5,0.5,-0.508)
p5.SetPoint1(0.5,-0.5,-0.508)
p5.SetPoint2(-0.5,0.5,-0.508)
p5.SetXResolution(5)
p5.SetYResolution(5)
p5.Update()
p6 = vtk.vtkPlaneSource()
p6.SetOrigin(0.5,0.5,0.508)
p6.SetPoint1(-0.5,0.5,0.508)
p6.SetPoint2(0.5,-0.5,0.508)
p6.SetXResolution(5)
p6.SetYResolution(5)
p6.Update()
# append together
ap = vtk.vtkAppendPolyData()
ap.AddInputData(p1.GetOutput())
ap.AddInputData(p2.GetOutput())
ap.AddInputData(p3.GetOutput())
ap.AddInputData(p4.GetOutput())
ap.AddInputData(p5.GetOutput())
ap.AddInputData(p6.GetOutput())
#--------------------------
tLinear = vtk.vtkTransform()
tPerspective = vtk.vtkPerspectiveTransform()
tGeneral = vtk.vtkGeneralTransform()
# set up a linear transformation
tLinear.Scale(1.2,1.0,0.8)
tLinear.RotateX(30)
tLinear.RotateY(10)
tLinear.RotateZ(80)
tLinear.Translate(0.2,0.3,-0.1)
tLinear.Update()
# set up a perspective transform
tPerspective.SetInput(tLinear)
tPerspective.SetInput(tLinear.GetInverse())
tPerspective.Scale(2,2,2)
# these should cancel
tPerspective.AdjustViewport(-0.5,0.5,-0.5,0.5,-1,1,-1,1)
tPerspective.AdjustViewport(-1,1,-1,1,-0.5,0.5,-0.5,0.5)
# test shear transformation
tPerspective.Shear(0.2,0.3,0.0)
tPerspective.Update()
# the following 6 operations cancel out
tPerspective.RotateWXYZ(30,1,1,1)
tPerspective.RotateWXYZ(-30,1,1,1)
tPerspective.Scale(2,2,2)
tPerspective.Scale(0.5,0.5,0.5)
tPerspective.Translate(10,0.1,0.3)
tPerspective.Translate(-10,-0.1,-0.3)
tPerspective.Concatenate(tLinear)
# test push and pop
tPerspective.Push()
tPerspective.RotateX(30)
tPerspective.RotateY(10)
tPerspective.RotateZ(80)
tPerspective.Translate(0.1,-0.2,0.0)
# test copy of transforms
tNew = tPerspective.MakeTransform()
tNew.DeepCopy(tPerspective)
tPerspective.Pop()
# test general transform
tGeneral.SetInput(tLinear)
tGeneral.SetInput(tPerspective)
tGeneral.PostMultiply()
tGeneral.Concatenate(tNew)
tGeneral.Concatenate(tNew.GetInverse())
tGeneral.PreMultiply()
# the following 6 operations cancel out
tGeneral.RotateWXYZ(30,1,1,1)
tGeneral.RotateWXYZ(-30,1,1,1)
tGeneral.Scale(2,2,2)
tGeneral.Scale(0.5,0.5,0.5)
tGeneral.Translate(10,0.1,0.3)
tGeneral.Translate(-10,-0.1,-0.3)
#--------------------------
# identity transform
f11 = vtk.vtkTransformPolyDataFilter()
f11.SetInputConnection(ap.GetOutputPort())
f11.SetTransform(tLinear)
m11 = vtk.vtkDataSetMapper()
m11.SetInputConnection(f11.GetOutputPort())
a11 = vtk.vtkActor()
a11.SetMapper(m11)
a11.GetProperty().SetColor(1,0,0)
a11.GetProperty().SetRepresentationToWireframe()
ren11 = vtk.vtkRenderer()
ren11.SetViewport(0.0,0.5,0.25,1.0)
ren11.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren11.AddActor(a11)
renWin.AddRenderer(ren11)
# inverse identity transform
f12 = vtk.vtkTransformPolyDataFilter()
f12.SetInputConnection(ap.GetOutputPort())
f12.SetTransform(tLinear.GetInverse())
m12 = vtk.vtkDataSetMapper()
m12.SetInputConnection(f12.GetOutputPort())
a12 = vtk.vtkActor()
a12.SetMapper(m12)
a12.GetProperty().SetColor(0.9,0.9,0)
a12.GetProperty().SetRepresentationToWireframe()
ren12 = vtk.vtkRenderer()
ren12.SetViewport(0.0,0.0,0.25,0.5)
ren12.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren12.AddActor(a12)
renWin.AddRenderer(ren12)
#--------------------------
# linear transform
f21 = vtk.vtkTransformPolyDataFilter()
f21.SetInputConnection(ap.GetOutputPort())
f21.SetTransform(tPerspective)
m21 = vtk.vtkDataSetMapper()
m21.SetInputConnection(f21.GetOutputPort())
a21 = vtk.vtkActor()
a21.SetMapper(m21)
a21.GetProperty().SetColor(1,0,0)
a21.GetProperty().SetRepresentationToWireframe()
ren21 = vtk.vtkRenderer()
ren21.SetViewport(0.25,0.5,0.50,1.0)
ren21.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren21.AddActor(a21)
renWin.AddRenderer(ren21)
# inverse linear transform
f22 = vtk.vtkTransformPolyDataFilter()
f22.SetInputConnection(ap.GetOutputPort())
f22.SetTransform(tPerspective.GetInverse())
m22 = vtk.vtkDataSetMapper()
m22.SetInputConnection(f22.GetOutputPort())
a22 = vtk.vtkActor()
a22.SetMapper(m22)
a22.GetProperty().SetColor(0.9,0.9,0)
a22.GetProperty().SetRepresentationToWireframe()
ren22 = vtk.vtkRenderer()
ren22.SetViewport(0.25,0.0,0.50,0.5)
ren22.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren22.AddActor(a22)
renWin.AddRenderer(ren22)
#--------------------------
# perspective transform
matrix = vtk.vtkMatrix4x4()
matrix.SetElement(3,0,0.1)
matrix.SetElement(3,1,0.2)
matrix.SetElement(3,2,0.5)
f31 = vtk.vtkTransformPolyDataFilter()
f31.SetInputConnection(ap.GetOutputPort())
f31.SetTransform(tNew)
m31 = vtk.vtkDataSetMapper()
m31.SetInputConnection(f31.GetOutputPort())
a31 = vtk.vtkActor()
a31.SetMapper(m31)
a31.GetProperty().SetColor(1,0,0)
a31.GetProperty().SetRepresentationToWireframe()
ren31 = vtk.vtkRenderer()
ren31.SetViewport(0.50,0.5,0.75,1.0)
ren31.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren31.AddActor(a31)
renWin.AddRenderer(ren31)
# inverse linear transform
f32 = vtk.vtkTransformPolyDataFilter()
f32.SetInputConnection(ap.GetOutputPort())
f32.SetTransform(tNew.GetInverse())
m32 = vtk.vtkDataSetMapper()
m32.SetInputConnection(f32.GetOutputPort())
a32 = vtk.vtkActor()
a32.SetMapper(m32)
a32.GetProperty().SetColor(0.9,0.9,0)
a32.GetProperty().SetRepresentationToWireframe()
ren32 = vtk.vtkRenderer()
ren32.SetViewport(0.5,0.0,0.75,0.5)
ren32.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren32.AddActor(a32)
renWin.AddRenderer(ren32)
#--------------------------
# perspective transform concatenation
f41 = vtk.vtkTransformPolyDataFilter()
f41.SetInputConnection(ap.GetOutputPort())
f41.SetTransform(tGeneral)
m41 = vtk.vtkDataSetMapper()
m41.SetInputConnection(f41.GetOutputPort())
a41 = vtk.vtkActor()
a41.SetMapper(m41)
a41.GetProperty().SetColor(1,0,0)
a41.GetProperty().SetRepresentationToWireframe()
ren41 = vtk.vtkRenderer()
ren41.SetViewport(0.75,0.5,1.0,1.0)
ren41.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren41.AddActor(a41)
renWin.AddRenderer(ren41)
# inverse linear transform
f42 = vtk.vtkTransformPolyDataFilter()
f42.SetInputConnection(ap.GetOutputPort())
f42.SetTransform(tGeneral.GetInverse())
m42 = vtk.vtkDataSetMapper()
m42.SetInputConnection(f42.GetOutputPort())
a42 = vtk.vtkActor()
a42.SetMapper(m42)
a42.GetProperty().SetColor(0.9,0.9,0)
a42.GetProperty().SetRepresentationToWireframe()
ren42 = vtk.vtkRenderer()
ren42.SetViewport(0.75,0.0,1.0,0.5)
ren42.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren42.AddActor(a42)
renWin.AddRenderer(ren42)
renWin.Render()
# free what we did a MakeTransform on
tNew.UnRegister(None) # not needed in python
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/TransformCoverage.py
|
Python
|
gpl-3.0
| 7,494
|
[
"VTK"
] |
be0d3c2ef07306ea644e6b5ef0ed3acce4ab602a61e53991b7680039ee83ac89
|
from __future__ import print_function
import numpy as np
from math import sqrt, exp
def tri2full(H_nn, UL='L'):
"""Fill in values of hermitian matrix.
Fill values in lower or upper triangle of H_nn based on the opposite
triangle, such that the resulting matrix is symmetric/hermitian.
UL='U' will copy (conjugated) values from upper triangle into the
lower triangle.
UL='L' will copy (conjugated) values from lower triangle into the
upper triangle.
"""
N, tmp = H_nn.shape
assert N == tmp, 'Matrix must be square'
#assert np.isreal(H_nn.diagonal()).all(), 'Diagonal should be real'
if UL != 'L':
H_nn = H_nn.T
for n in range(N - 1):
H_nn[n, n + 1:] = H_nn[n + 1:, n].conj()
def dagger(matrix):
return np.conj(matrix.T)
def rotate_matrix(h, u):
return np.dot(u.T.conj(), np.dot(h, u))
def get_subspace(matrix, index):
"""Get the subspace spanned by the basis function listed in index"""
assert matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]
return matrix.take(index, 0).take(index, 1)
permute_matrix = get_subspace
def normalize(matrix, S=None):
"""Normalize column vectors.
::
<matrix[:,i]| S |matrix[:,i]> = 1
"""
for col in matrix.T:
if S is None:
col /= np.linalg.norm(col)
else:
col /= np.sqrt(np.dot(col.conj(), np.dot(S, col)))
def subdiagonalize(h_ii, s_ii, index_j):
nb = h_ii.shape[0]
nb_sub = len(index_j)
h_sub_jj = get_subspace(h_ii, index_j)
s_sub_jj = get_subspace(s_ii, index_j)
e_j, v_jj = np.linalg.eig(np.linalg.solve(s_sub_jj, h_sub_jj))
normalize(v_jj, s_sub_jj) # normalize: <v_j|s|v_j> = 1
permute_list = np.argsort(e_j.real)
e_j = np.take(e_j, permute_list)
v_jj = np.take(v_jj, permute_list, axis=1)
#setup transformation matrix
c_ii = np.identity(nb, complex)
for i in range(nb_sub):
for j in range(nb_sub):
c_ii[index_j[i], index_j[j]] = v_jj[i, j]
h1_ii = rotate_matrix(h_ii, c_ii)
s1_ii = rotate_matrix(s_ii, c_ii)
return h1_ii, s1_ii, c_ii, e_j
def cutcoupling(h, s, index_n):
for i in index_n:
s[:, i] = 0.0
s[i, :] = 0.0
s[i, i] = 1.0
Ei = h[i, i]
h[:, i] = 0.0
h[i, :] = 0.0
h[i, i] = Ei
def fermidistribution(energy, kt):
#fermi level is fixed to zero
return 1.0 / (1.0 + np.exp(energy / kt) )
def fliplr(a):
length=len(a)
b = [0] * length
for i in range(length):
b[i] = a[length - i - 1]
return b
def plot_path(energy):
import pylab
pylab.plot(np.real(energy), np.imag(energy), 'b--o')
pylab.show()
def function_integral(function, calcutype):
#return the integral of the 'function' on 'intrange'
#the function can be a value or a matrix, arg1,arg2 are the possible
#parameters of the function
intctrl = function.intctrl
if calcutype == 'eqInt':
intrange = intctrl.eqintpath
tol = intctrl.eqinttol
if hasattr(function.intctrl, 'eqpath_radius'):
radius = function.intctrl.eqpath_radius
else:
radius = -1
if hasattr(function.intctrl, 'eqpath_origin'):
origin = function.intctrl.eqpath_origin
else:
origin = 1000
elif calcutype == 'neInt':
intrange = intctrl.neintpath
tol = intctrl.neinttol
radius = -1
origin = 1000
elif calcutype == 'locInt':
intrange = intctrl.locintpath
tol = intctrl.locinttol
if hasattr(function.intctrl, 'locpath_radius'):
radius = function.intctrl.locpath_radius
else:
radius = -1
if hasattr(function.intctrl, 'locpath_origin'):
origin = function.intctrl.locpath_origin
else:
origin = 1000
trace = 0
a = 0.
b = 1.
#Initialize with 13 function evaluations.
c = (a + b) / 2
h = (b - a) / 2
realmin = 2e-17
s = [.942882415695480, sqrt(2.0/3),
.641853342345781, 1/sqrt(5.0), .236383199662150]
s1 = [0] * len(s)
s2 = [0] * len(s)
for i in range(len(s)):
s1[i] = c - s[i] * h
s2[i] = c + fliplr(s)[i] * h
x0 = [a] + s1 + [c] + s2 + [b]
s0 = [.0158271919734802, .094273840218850, .155071987336585,
.188821573960182, .199773405226859, .224926465333340]
w0 = s0 + [.242611071901408] + fliplr(s0)
w1 = [1, 0, 0, 0, 5, 0, 0, 0, 5, 0, 0, 0, 1]
w2 = [77, 0, 432, 0, 625, 0, 672, 0, 625, 0, 432, 0, 77]
for i in range(len(w1)):
w1[i] = w1[i] / 6.0
w2[i] = w2[i] / 1470.0
dZ = [intrange[:len(intrange) - 1], intrange[1:]]
hmin = [0] * len(dZ[1])
path_type = []
for i in range(len(intrange) - 1):
rs = np.abs(dZ[0][i] - origin)
re = np.abs(dZ[1][i] - origin)
if abs(rs - radius) < 1.0e-8 and abs(re - radius) < 1.0e-8:
path_type.append('half_circle')
else:
path_type.append('line')
for i in range(len(dZ[1])):
if path_type[i] == 'half_circle':
dZ[0][i] = 0
dZ[1][i] = np.pi
for i in range(len(dZ[1])):
dZ[1][i] = dZ[1][i] - dZ[0][i]
hmin[i] = realmin / 1024 * abs(dZ[1][i])
temp = np.array([[1] * 13, x0]).transpose()
Zx = np.dot(temp, np.array(dZ))
Zxx = []
for i in range(len(intrange) - 1):
for j in range(13):
Zxx.append(Zx[j][i])
ns = 0
ne = 12
if path_type[0] == 'line':
yns = function.calgfunc(Zxx[ns], calcutype)
elif path_type[0] == 'half_circle':
energy = origin + radius * np.exp((np.pi - Zxx[ns + i]) * 1.j)
yns = -1.j * radius * np.exp(-1.j* Zxx[ns +i])* function.calgfunc(energy, calcutype)
fcnt = 0
for n in range(len(intrange)-1):
# below evaluate the integral and adjust the tolerance
Q1pQ0 = yns * (w1[0] - w0[0])
Q2pQ0 = yns * (w2[0] - w0[0])
fcnt = fcnt + 12
for i in range(1,12):
if path_type[n] == 'line':
yne = function.calgfunc(Zxx[ns + i], calcutype)
elif path_type[n] == 'half_circle':
energy = origin + radius * np.exp((np.pi -Zxx[ns + i]) * 1.j)
yne = -1.j * radius * np.exp(-1.j * Zxx[ns + i])* function.calgfunc(energy, calcutype)
Q1pQ0 += yne * (w1[i] - w0[i])
Q2pQ0 += yne * (w2[i] - w0[i])
# Increase the tolerance if refinement appears to be effective
r = np.abs(Q2pQ0) / (np.abs(Q1pQ0) + np.abs(realmin))
dim = np.product(r.shape)
r = np.sum(r) / dim
if r > 0 and r < 1:
thistol = tol / r
else:
thistol = tol
if path_type[n] == 'line':
yne = function.calgfunc(Zxx[ne], calcutype)
elif path_type[n] == 'half_circle':
energy = origin + radius * np.exp((np.pi -Zxx[ne]) * 1.j)
yne = -1.j * radius * np.exp(-1.j * Zxx[ne])* function.calgfunc(energy, calcutype)
#Call the recursive core integrator
Qk, xpk, wpk, fcnt, warn = quadlstep(function, Zxx[ns],
Zxx[ne], yns, yne,
thistol, trace, fcnt,
hmin[n], calcutype, path_type[n],
origin, radius)
if n == 0:
Q = np.copy(Qk)
Xp = xpk[:]
Wp = wpk[:]
else:
Q += Qk
Xp = Xp[:-1] + xpk
Wp = Wp[:-1] + [Wp[-1] + wpk[0]] + wpk[1:]
if warn == 1:
print('warning: Minimum step size reached,singularity possible')
elif warn == 2:
print('warning: Maximum function count excced; singularity likely')
elif warn == 3:
print('warning: Infinite or Not-a-Number function value encountered')
else:
pass
ns += 13
ne += 13
yns = np.copy(yne)
return Q,Xp,Wp,fcnt
def quadlstep(f, Za, Zb, fa, fb, tol, trace, fcnt, hmin, calcutype,
path_type, origin, radius):
#Gaussian-Lobatto and Kronrod method
#QUADLSTEP Recursive core routine for integral
#input parameters:
# f ---------- function, here we just use the module calgfunc
# to return the value, if wanna use it for
# another one, change it
# Za, Zb ---------- the start and end point of the integral
# fa, fb ---------- the function value on Za and Zb
# fcnt ---------- the number of the funtion recalled till now
#output parameters:
# Q ---------- integral
# Xp ---------- selected points
# Wp ---------- weight
# fcnt ---------- the number of the function recalled till now
maxfcnt = 10000
# Evaluate integrand five times in interior of subintrval [a,b]
Zh = (Zb - Za) / 2.0
if abs(Zh) < hmin:
# Minimun step size reached; singularity possible
Q = Zh * (fa + fb)
if path_type == 'line':
Xp = [Za, Zb]
elif path_type == 'half_circle':
Xp = [origin + radius * np.exp((np.pi - Za) * 1.j),
origin + radius * np.exp((np.pi - Zb) * 1.j)]
Wp = [Zh, Zh]
warn = 1
return Q, Xp, Wp, fcnt, warn
fcnt += 5
if fcnt > maxfcnt:
#Maximum function count exceed; singularity likely
Q = Zh * (fa + fb)
if path_type == 'line':
Xp = [Za, Zb]
elif path_type == 'half_circle':
Xp = [origin + radius * np.exp((np.pi - Za) * 1.j),
origin + radius * np.exp((np.pi - Zb) * 1.j)]
Wp = [Zh, Zh]
warn = 2
return Q, Xp, Wp, fcnt, warn
x = [0.18350341907227, 0.55278640450004, 1.0,
1.44721359549996, 1.81649658092773];
Zx = [0] * len(x)
y = [0] * len(x)
for i in range(len(x)):
x[i] *= 0.5
Zx[i] = Za + (Zb - Za) * x[i]
if path_type == 'line':
y[i] = f.calgfunc(Zx[i], calcutype)
elif path_type == 'half_circle':
energy = origin + radius * np.exp((np.pi - Zx[i]) * 1.j)
y[i] = f.calgfunc(energy, calcutype)
#Four point Lobatto quadrature
s1 = [1.0, 0.0, 5.0, 0.0, 5.0, 0.0, 1.0]
s2 = [77.0, 432.0, 625.0, 672.0, 625.0, 432.0, 77.0]
Wk = [0] * 7
Wp = [0] * 7
for i in range(7):
Wk[i] = (Zh / 6.0) * s1[i]
Wp[i] = (Zh / 1470.0) * s2[i]
if path_type == 'line':
Xp = [Za] + Zx + [Zb]
elif path_type == 'half_circle':
Xp = [Za] + Zx + [Zb]
for i in range(7):
factor = -1.j * radius * np.exp(1.j * (np.pi - Xp[i]))
Wk[i] *= factor
Wp[i] *= factor
Xp[i] = origin + radius * np.exp((np.pi - Xp[i]) * 1.j)
Qk = fa * Wk[0] + fb * Wk[6]
Q = fa * Wp[0] + fb * Wp[6]
for i in range(1, 6):
Qk += y[i-1] * Wk[i]
Q += y[i-1] * Wp[i]
if np.isinf(np.max(np.abs(Q))):
Q = Zh * (fa + fb)
if path_type == 'line':
Xp = [Za, Zb]
elif path_type == 'half_circle':
Xp = [origin + radius * np.exp((np.pi - Za) * 1.j),
origin + radius * np.exp((np.pi - Zb) * 1.j)]
Wp = [Zh, Zh]
warn = 3
return Qk, Xp, Wp, fcnt, warn
else:
pass
if trace:
print(fcnt, np.real(Za), np.imag(Za), np.abs(Zh))
#Check accurancy of integral over this subinterval
XXk = [Xp[0], Xp[2], Xp[4], Xp[6]]
WWk = [Wk[0], Wk[2], Wk[4], Wk[6]]
YYk = [fa, y[1], y[3], fb]
if np.max(np.abs(Qk - Q)) <= tol:
warn = 0
return Q, XXk, WWk, fcnt, warn
#Subdivide into six subintevals
else:
Q, Xk, Wk, fcnt, warn = quadlstep(f, Za, Zx[1], fa, YYk[1],
tol, trace, fcnt, hmin,
calcutype, path_type,
origin, radius)
Qk, xkk, wkk, fcnt, warnk = quadlstep(f, Zx[1],
Zx[3], YYk[1], YYk[2], tol, trace, fcnt, hmin,
calcutype, path_type,
origin, radius)
Q += Qk
Xk = Xk[:-1] + xkk
Wk = Wk[:-1] + [Wk[-1] + wkk[0]] + wkk[1:]
warn = max(warn, warnk)
Qk, xkk, wkk, fcnt, warnk = quadlstep(f, Zx[3], Zb, YYk[2], fb,
tol, trace, fcnt, hmin,
calcutype, path_type,
origin, radius)
Q += Qk
Xk = Xk[:-1] + xkk
Wk = Wk[:-1] + [Wk[-1] + wkk[0]] + wkk[1:]
warn = max(warn, warnk)
return Q, Xk, Wk, fcnt, warn
def mytextread0(filename):
num = 0
df = file(filename)
df.seek(0)
for line in df:
if num == 0:
dim = line.strip().split(' ')
row = int(dim[0])
col = int(dim[1])
mat = np.empty([row, col])
else:
data = line.strip().split(' ')
if len(data) == 0 or len(data)== 1:
break
else:
for i in range(len(data)):
mat[num - 1, i] = float(data[i])
num += 1
return mat
def mytextread1(filename):
num = 0
df = file(filename)
df.seek(0)
data = []
for line in df:
tmp = line.strip()
if len(tmp) != 0:
data.append(float(tmp))
else:
break
dim = int(sqrt(len(data)))
mat = np.empty([dim, dim])
for i in range(dim):
for j in range(dim):
mat[i, j] = data[num]
num += 1
return mat
def mytextwrite1(filename, mat):
num = 0
df = open(filename,'w')
df.seek(0)
dim = mat.shape[0]
if dim != mat.shape[1]:
print('matwirte, matrix is not square')
for i in range(dim):
for j in range(dim):
df.write('%20.20e\n'% mat[i, j])
df.close()
|
suttond/MODOI
|
ase/transport/tools.py
|
Python
|
lgpl-3.0
| 14,465
|
[
"Gaussian"
] |
2abb984cc2cbedcfe6eb0ffff6cb8d629e127900fd38833c374335dd2f1c0af8
|
########################################################################
# $HeadURL$
# File : SSHComputingElement.py
# Author : Dumitru Laurentiu, A.T.
########################################################################
""" SSH (Virtual) Computing Element: For a given IP/host it will send jobs directly through ssh
"""
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC import S_OK, S_ERROR
from DIRAC import rootPath
from DIRAC import gLogger
import os, urllib
import shutil, tempfile
from types import StringTypes
__RCSID__ = "$Id$"
CE_NAME = 'SSH'
MANDATORY_PARAMETERS = [ 'Queue' ]
class SSH:
""" The SSH interface
"""
def __init__( self, user = None, host = None, password = None, key = None, parameters = {}, options = "" ):
self.user = user
if not user:
self.user = parameters.get( 'SSHUser', '' )
self.host = host
if not host:
self.host = parameters.get( 'SSHHost', '' )
self.password = password
if not password:
self.password = parameters.get( 'SSHPassword', '' )
self.key = key
if not key:
self.key = parameters.get( 'SSHKey', '' )
self.options = options
if not len(options):
self.options = parameters.get( 'SSHOptions', '' )
self.log = gLogger.getSubLogger( 'SSH' )
def __ssh_call( self, command, timeout ):
try:
from DIRAC.Resources.Computing import pexpect
expectFlag = True
except:
from DIRAC.Core.Utilities.Subprocess import shellCall
expectFlag = False
if not timeout:
timeout = 999
if expectFlag:
ssh_newkey = 'Are you sure you want to continue connecting'
try:
child = pexpect.spawn( command, timeout = timeout )
i = child.expect( [pexpect.TIMEOUT, ssh_newkey, pexpect.EOF, 'assword: '] )
if i == 0: # Timeout
return S_OK( ( -1, child.before, 'SSH login failed' ) )
elif i == 1: # SSH does not have the public key. Just accept it.
child.sendline ( 'yes' )
child.expect ( 'assword: ' )
i = child.expect( [pexpect.TIMEOUT, 'assword: '] )
if i == 0: # Timeout
return S_OK( ( -1, str( child.before ) + str( child.after ), 'SSH login failed' ) )
elif i == 1:
child.sendline( self.password )
child.expect( pexpect.EOF )
return S_OK( ( 0, child.before, '' ) )
elif i == 2:
# Passwordless login, get the output
return S_OK( ( 0, child.before, '' ) )
if self.password:
child.sendline( self.password )
child.expect( pexpect.EOF )
return S_OK( ( 0, child.before, '' ) )
else:
return S_ERROR( ( -2, child.before, '' ) )
except Exception, x:
res = ( -1 , 'Encountered exception %s: %s' % ( Exception, str( x ) ) )
return S_ERROR( res )
else:
# Try passwordless login
result = shellCall( timeout, command )
# print ( "!!! SSH command: %s returned %s\n" % (command, result) )
if result['Value'][0] == 255:
return S_ERROR ( ( -1, 'Cannot connect to host %s' % self.host, '' ) )
return result
def sshCall( self, timeout, cmdSeq ):
""" Execute remote command via a ssh remote call
"""
command = cmdSeq
if type( cmdSeq ) == type( [] ):
command = ' '.join( cmdSeq )
key = ''
if self.key:
key = ' -i %s ' % self.key
pattern = "'===><==='"
command = 'ssh -q %s -l %s %s %s "echo %s;%s"' % ( key, self.user, self.host, self.options, pattern, command )
self.log.debug( "SSH command %s" % command )
result = self.__ssh_call( command, timeout )
self.log.debug( "SSH command result %s" % str( result ) )
if not result['OK']:
return result
# Take the output only after the predefined pattern
ind = result['Value'][1].find('===><===')
if ind == -1:
return result
status, output, error = result['Value']
output = output[ind+8:]
if output.startswith('\r'):
output = output[1:]
if output.startswith('\n'):
output = output[1:]
result['Value'] = ( status, output, error )
return result
def scpCall( self, timeout, localFile, destinationPath, upload = True ):
""" Execute scp copy
"""
key = ''
if self.key:
key = ' -i %s ' % self.key
if upload:
command = "scp %s %s %s %s@%s:%s" % ( key, self.options, localFile, self.user, self.host, destinationPath )
else:
command = "scp %s %s %s@%s:%s %s" % ( key, self.options, self.user, self.host, destinationPath, localFile )
self.log.debug( "SCP command %s" % command )
return self.__ssh_call( command, timeout )
class SSHComputingElement( ComputingElement ):
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.controlScript = 'sshce'
self.submittedJobs = 0
self.mandatoryParameters = MANDATORY_PARAMETERS
#############################################################################
def _addCEConfigDefaults( self ):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults( self )
# Now batch system specific ones
if 'ExecQueue' not in self.ceParameters:
self.ceParameters['ExecQueue'] = self.ceParameters.get( 'Queue', '' )
if 'SharedArea' not in self.ceParameters:
#. isn't a good location, move to $HOME
self.ceParameters['SharedArea'] = '$HOME'
if 'BatchOutput' not in self.ceParameters:
self.ceParameters['BatchOutput'] = 'data'
if 'BatchError' not in self.ceParameters:
self.ceParameters['BatchError'] = 'data'
if 'ExecutableArea' not in self.ceParameters:
self.ceParameters['ExecutableArea'] = 'data'
if 'InfoArea' not in self.ceParameters:
self.ceParameters['InfoArea'] = 'info'
if 'WorkArea' not in self.ceParameters:
self.ceParameters['WorkArea'] = 'work'
if 'SubmitOptions' not in self.ceParameters:
self.ceParameters['SubmitOptions'] = '-'
def _reset( self ):
""" Process CE parameters and make necessary adjustments
"""
self.queue = self.ceParameters['Queue']
if 'ExecQueue' not in self.ceParameters or not self.ceParameters['ExecQueue']:
self.ceParameters['ExecQueue'] = self.ceParameters.get( 'Queue', '' )
self.execQueue = self.ceParameters['ExecQueue']
self.log.info( "Using queue: ", self.queue )
self.sharedArea = self.ceParameters['SharedArea']
self.batchOutput = self.ceParameters['BatchOutput']
if not self.batchOutput.startswith( '/' ):
self.batchOutput = os.path.join( self.sharedArea, self.batchOutput )
self.batchError = self.ceParameters['BatchError']
if not self.batchError.startswith( '/' ):
self.batchError = os.path.join( self.sharedArea, self.batchError )
self.infoArea = self.ceParameters['InfoArea']
if not self.infoArea.startswith( '/' ):
self.infoArea = os.path.join( self.sharedArea, self.infoArea )
self.executableArea = self.ceParameters['ExecutableArea']
if not self.executableArea.startswith( '/' ):
self.executableArea = os.path.join( self.sharedArea, self.executableArea )
self.workArea = self.ceParameters['WorkArea']
if not self.workArea.startswith( '/' ):
self.workArea = os.path.join( self.sharedArea, self.workArea )
result = self._prepareRemoteHost()
self.submitOptions = ''
if 'SubmitOptions' in self.ceParameters:
self.submitOptions = self.ceParameters['SubmitOptions']
self.removeOutput = True
if 'RemoveOutput' in self.ceParameters:
if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']:
self.removeOutput = False
def _prepareRemoteHost(self, host=None ):
""" Prepare remote directories and upload control script
"""
ssh = SSH( host = host, parameters = self.ceParameters )
# Make remote directories
dirTuple = tuple ( uniqueElements( [self.sharedArea,
self.executableArea,
self.infoArea,
self.batchOutput,
self.batchError,
self.workArea] ) )
nDirs = len( dirTuple )
cmd = 'mkdir -p %s; '*nDirs % dirTuple
self.log.verbose( 'Creating working directories on %s' % self.ceParameters['SSHHost'] )
result = ssh.sshCall( 30, cmd )
if not result['OK']:
self.log.warn( 'Failed creating working directories: %s' % result['Message'][1] )
return result
status, output, error = result['Value']
if status == -1:
self.log.warn( 'TImeout while creating directories' )
return S_ERROR( 'TImeout while creating directories' )
if "cannot" in output:
self.log.warn( 'Failed to create directories: %s' % output )
return S_ERROR( 'Failed to create directories: %s' % output )
# Upload the control script now
sshScript = os.path.join( rootPath, "DIRAC", "Resources", "Computing", "remote_scripts", self.controlScript )
self.log.verbose( 'Uploading %s script to %s' % ( self.controlScript, self.ceParameters['SSHHost'] ) )
result = ssh.scpCall( 30, sshScript, self.sharedArea )
if not result['OK']:
self.log.warn( 'Failed uploading control script: %s' % result['Message'][1] )
return result
status, output, error = result['Value']
if status != 0:
if status == -1:
self.log.warn( 'Timeout while uploading control script' )
return S_ERROR( 'Timeout while uploading control script' )
else:
self.log.warn( 'Failed uploading control script: %s' % output )
return S_ERROR( 'Failed uploading control script' )
# Chmod the control scripts
self.log.verbose( 'Chmod +x control script' )
result = ssh.sshCall( 10, "chmod +x %s/%s" % ( self.sharedArea, self.controlScript ) )
if not result['OK']:
self.log.warn( 'Failed chmod control script: %s' % result['Message'][1] )
return result
status, output, error = result['Value']
if status != 0:
if status == -1:
self.log.warn( 'Timeout while chmod control script' )
return S_ERROR( 'Timeout while chmod control script' )
else:
self.log.warn( 'Failed uploading chmod script: %s' % output )
return S_ERROR( 'Failed uploading chmod script' )
return S_OK()
def submitJob( self, executableFile, proxy, numberOfJobs = 1 ):
# self.log.verbose( "Executable file path: %s" % executableFile )
if not os.access( executableFile, 5 ):
os.chmod( executableFile, 0755 )
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
if proxy:
self.log.verbose( 'Setting up proxy for payload' )
wrapperContent = bundleProxy( executableFile, proxy )
name = writeScript( wrapperContent, os.getcwd() )
submitFile = name
else: # no proxy
submitFile = executableFile
result = self._submitJobToHost( submitFile, numberOfJobs )
if proxy:
os.remove( submitFile )
return result
def _submitJobToHost( self, executableFile, numberOfJobs, host = None ):
""" Submit prepared executable to the given host
"""
ssh = SSH( host = host, parameters = self.ceParameters )
# Copy the executable
sFile = os.path.basename( executableFile )
result = ssh.scpCall( 10, executableFile, '%s/%s' % ( self.executableArea, os.path.basename( executableFile ) ) )
if not result['OK']:
return result
jobStamps = []
for i in range( numberOfJobs ):
jobStamps.append( makeGuid()[:8] )
jobStamp = '#'.join( jobStamps )
subOptions = urllib.quote( self.submitOptions )
cmd = "bash --login -c '%s/%s submit_job %s/%s %s %s %s %d %s %s %s %s'" % ( self.sharedArea,
self.controlScript,
self.executableArea,
os.path.basename( executableFile ),
self.batchOutput,
self.batchError,
self.workArea,
numberOfJobs,
self.infoArea,
jobStamp,
self.execQueue,
subOptions )
self.log.verbose( 'CE submission command: %s' % cmd )
result = ssh.sshCall( 120, cmd )
if not result['OK']:
self.log.error( '%s CE job submission failed' % self.ceType, result['Message'] )
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
submitHost = host
if host is None:
submitHost = self.ceParameters['SSHHost'].split('/')[0]
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from job submission: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed local batch job submission: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed job submission, reason: %s' % message )
else:
batchIDs = outputLines[1:]
jobIDs = [ self.ceType.lower()+'://'+self.ceName+'/'+id for id in batchIDs ]
else:
return S_ERROR( '\n'.join( [sshStdout, sshStderr] ) )
result = S_OK ( jobIDs )
self.submittedJobs += len( batchIDs )
return result
def killJob( self, jobIDList ):
""" Kill a bunch of jobs
"""
if type( jobIDList ) in StringTypes:
jobIDList = [jobIDList]
return self._killJobOnHost( jobIDList )
def _killJobOnHost( self, jobIDList, host = None ):
""" Kill the jobs for the given list of job IDs
"""
resultDict = {}
ssh = SSH( host = host, parameters = self.ceParameters )
jobDict = {}
for job in jobIDList:
result = pfnparse( job )
if result['OK']:
stamp = result['Value']['FileName']
else:
self.log.error( 'Invalid job id', job )
continue
jobDict[stamp] = job
stampList = jobDict.keys()
cmd = "bash --login -c '%s/%s kill_job %s %s'" % ( self.sharedArea, self.controlScript, '#'.join( stampList ),
self.infoArea )
result = ssh.sshCall( 10, cmd )
if not result['OK']:
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from job kill: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed local batch job kill: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed job kill, reason: %s' % message )
else:
return S_ERROR( '\n'.join( [sshStdout, sshStderr] ) )
return S_OK()
def _getHostStatus( self, host = None ):
""" Get jobs running at a given host
"""
ssh = SSH( host = host, parameters = self.ceParameters )
cmd = "bash --login -c '%s/%s status_info %s %s %s %s'" % ( self.sharedArea,
self.controlScript,
self.infoArea,
self.workArea,
self.ceParameters['SSHUser'],
self.execQueue )
result = ssh.sshCall( 10, cmd )
if not result['OK']:
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
resultDict = {}
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from CE get status: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed to get CE status: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed to get CE status, reason: %s' % message )
else:
for line in outputLines[1:]:
if ':::' in line:
jobStatus, nJobs = line.split( ':::' )
resultDict[jobStatus] = int( nJobs )
else:
return S_ERROR( '\n'.join( [sshStdout, sshStderr] ) )
return S_OK( resultDict )
def getCEStatus( self, jobIDList = None ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.submittedJobs
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
resultHost = self._getHostStatus()
if not resultHost['OK']:
return resultHost
result['RunningJobs'] = resultHost['Value'].get( 'Running', 0 )
result['WaitingJobs'] = resultHost['Value'].get( 'Waiting', 0 )
self.log.verbose( 'Waiting Jobs: ', result['WaitingJobs'] )
self.log.verbose( 'Running Jobs: ', result['RunningJobs'] )
return result
def getJobStatus( self, jobIDList ):
""" Get the status information for the given list of jobs
"""
return self._getJobStatusOnHost( jobIDList )
def _getJobStatusOnHost( self, jobIDList, host = None ):
""" Get the status information for the given list of jobs
"""
# self.log.verbose( '*** getUnitJobStatus %s - %s\n' % ( jobIDList, host) )
resultDict = {}
ssh = SSH( host = host, parameters = self.ceParameters )
jobDict = {}
for job in jobIDList:
result = pfnparse( job )
if result['OK']:
stamp = result['Value']['FileName']
else:
self.log.error( 'Invalid job id', job )
continue
jobDict[stamp] = job
stampList = jobDict.keys()
cmd = "bash --login -c '%s/%s job_status %s %s %s'" % ( self.sharedArea,
self.controlScript,
'#'.join( stampList ),
self.infoArea,
self.ceParameters['SSHUser'] )
result = ssh.sshCall( 30, cmd )
if not result['OK']:
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from job get status: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed local batch job status: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed job kill, reason: %s' % message )
else:
for line in outputLines[1:]:
jbundle = line.split( ':::' )
if ( len( jbundle ) == 2 ):
resultDict[jobDict[jbundle[0]]] = jbundle[1]
else:
return S_ERROR( '\n'.join( [sshStdout, sshStderr] ) )
# self.log.verbose( ' !!! getUnitJobStatus will return : %s\n' % resultDict )
return S_OK( resultDict )
def _getJobOutputFiles( self, jobID ):
""" Get output file names for the specific CE
"""
result = pfnparse( jobID )
if not result['OK']:
return result
jobStamp = result['Value']['FileName']
host = result['Value']['Host']
output = '%s/%s.out' % ( self.batchOutput, jobStamp )
error = '%s/%s.err' % ( self.batchError, jobStamp )
return S_OK( (jobStamp, host, output, error) )
def getJobOutput( self, jobID, localDir = None ):
""" Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
result = self._getJobOutputFiles(jobID)
if not result['OK']:
return result
jobStamp, host, outputFile, errorFile = result['Value']
self.log.verbose( 'Getting output for jobID %s' % jobID )
if not localDir:
tempDir = tempfile.mkdtemp()
else:
tempDir = localDir
ssh = SSH( parameters = self.ceParameters )
result = ssh.scpCall( 20, '%s/%s.out' % ( tempDir, jobStamp ), '%s' % outputFile, upload = False )
if not result['OK']:
return result
if not os.path.exists( '%s/%s.out' % ( tempDir, jobStamp ) ):
os.system( 'touch %s/%s.out' % ( tempDir, jobStamp ) )
result = ssh.scpCall( 20, '%s/%s.err' % ( tempDir, jobStamp ), '%s' % errorFile, upload = False )
if not result['OK']:
return result
if not os.path.exists( '%s/%s.err' % ( tempDir, jobStamp ) ):
os.system( 'touch %s/%s.err' % ( tempDir, jobStamp ) )
# The result is OK, we can remove the output
if self.removeOutput:
result = ssh.sshCall( 10, 'rm -f %s/%s.out %s/%s.err' % ( self.batchOutput, jobStamp, self.batchError, jobStamp ) )
if localDir:
return S_OK( ( '%s/%s.out' % ( tempDir, jobStamp ), '%s/%s.err' % ( tempDir, jobStamp ) ) )
else:
# Return the output as a string
outputFile = open( '%s/%s.out' % ( tempDir, jobStamp ), 'r' )
output = outputFile.read()
outputFile.close()
outputFile = open( '%s/%s.err' % ( tempDir, jobStamp ), 'r' )
error = outputFile.read()
outputFile.close()
shutil.rmtree( tempDir )
return S_OK( ( output, error ) )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
rajanandakumar/DIRAC
|
Resources/Computing/SSHComputingElement.py
|
Python
|
gpl-3.0
| 24,521
|
[
"DIRAC"
] |
ccf241ada80db430381c75678881dce02c0cfb3a1eeb2d1c7644ad9ce9a71f73
|
"""
==========================================================
Compute point-spread functions (PSFs) for MNE/dSPM/sLORETA
==========================================================
PSFs are computed for four labels in the MNE sample data set
for linear inverse operators (MNE, dSPM, sLORETA).
PSFs describe the spread of activation from one label
across the cortical surface.
"""
# Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
from mayavi import mlab
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, point_spread_function
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_inv_eegmeg = (data_path +
'/MEG/sample/sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif')
fname_inv_meg = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
data_path + '/MEG/sample/labels/Aud-lh.label',
data_path + '/MEG/sample/labels/Vis-rh.label',
data_path + '/MEG/sample/labels/Vis-lh.label']
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# read inverse operators
inverse_operator_eegmeg = read_inverse_operator(fname_inv_eegmeg)
inverse_operator_meg = read_inverse_operator(fname_inv_meg)
# read label(s)
labels = [mne.read_label(ss) for ss in fname_label]
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'MNE' # can be 'MNE' or 'sLORETA'
mode = 'svd'
n_svd_comp = 1
stc_psf_eegmeg, _ = point_spread_function(
inverse_operator_eegmeg, forward, method=method, labels=labels,
lambda2=lambda2, pick_ori='normal', mode=mode, n_svd_comp=n_svd_comp)
stc_psf_meg, _ = point_spread_function(
inverse_operator_meg, forward, method=method, labels=labels,
lambda2=lambda2, pick_ori='normal', mode=mode, n_svd_comp=n_svd_comp)
# save for viewing in mne_analyze in order of labels in 'labels'
# last sample is average across PSFs
# stc_psf_eegmeg.save('psf_eegmeg')
# stc_psf_meg.save('psf_meg')
time_label = "EEGMEG %d"
brain_eegmeg = stc_psf_eegmeg.plot(hemi='rh', subjects_dir=subjects_dir,
time_label=time_label,
figure=mlab.figure(size=(500, 500)))
time_label = "MEG %d"
brain_meg = stc_psf_meg.plot(hemi='rh', subjects_dir=subjects_dir,
time_label=time_label,
figure=mlab.figure(size=(500, 500)))
# The PSF is centred around the right auditory cortex label,
# but clearly extends beyond it.
# It also contains "sidelobes" or "ghost sources"
# in middle/superior temporal lobe.
# For the Aud-RH example, MEG and EEGMEG do not seem to differ a lot,
# but the addition of EEG still decreases point-spread to distant areas
# (e.g. to ATL and IFG).
# The chosen labels are quite far apart from each other, so their PSFs
# do not overlap (check in mne_analyze)
|
adykstra/mne-python
|
examples/inverse/plot_mne_point_spread_function.py
|
Python
|
bsd-3-clause
| 3,134
|
[
"Mayavi"
] |
3202c45848a8aece8580c5dfc1ec9e0a70c7a407f87acfce35c1b8631f84bbae
|
# -*- coding: utf-8 -*-
"""
Kay framework.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
Ian Lewis <IanMLewis@gmail.com>
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import logging
import settings
__version__ = "2.0.0qa1"
__version_info__ = (2, 0, 0, 'qa', 1)
KAY_DIR = os.path.abspath(os.path.dirname(__file__))
LIB_DIR = os.path.join(KAY_DIR, 'lib')
PROJECT_DIR = os.path.abspath(os.path.dirname(settings.__file__))
PROJECT_LIB_DIR = os.path.join(PROJECT_DIR, 'lib')
def setup_env(manage_py_env=False):
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
'/usr/local/google_appengine']
for path in os.environ.get('PATH', '').replace(';', ':').split(':'):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
prefix = '%(PROGRAMFILES)s' % os.environ
paths.append(prefix + r'\Google\google_appengine')
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
sdk_path = os.path.realpath(sdk_path)
if os.path.exists(sdk_path):
SDK_PATH = sdk_path
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
'Please visit http://kay-docs.shehas.net/'
' for installation instructions.\n')
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
SDK_PATH = os.path.realpath(SDK_PATH)
# if SDK_PATH points to a file, it could be a zip file.
if os.path.isfile(SDK_PATH):
import zipfile
gae_zip = zipfile.ZipFile(SDK_PATH)
lib_prefix = os.path.join('google_appengine', 'lib')
lib = os.path.join(SDK_PATH, lib_prefix)
pkg_names = []
# add all packages archived under lib in SDK_PATH zip.
for filename in sorted(e.filename for e in gae_zip.filelist):
# package should have __init__.py
if (filename.startswith(lib_prefix) and
filename.endswith('__init__.py')):
pkg_path = filename.replace(os.sep+'__init__.py', '')
# True package root should have __init__.py in upper directory,
# thus we can treat only the shortest unique path as package root.
for pkg_name in pkg_names:
if pkg_path.startswith(pkg_name):
break
else:
pkg_names.append(pkg_path)
# insert populated EXTRA_PATHS into sys.path.
EXTRA_PATHS = ([os.path.dirname(os.path.join(SDK_PATH, pkg_name))
for pkg_name in pkg_names]
+ [os.path.join(SDK_PATH, 'google_appengine')])
sys.path = EXTRA_PATHS + sys.path
# tweak dev_appserver so to make zipimport and templates work well.
from google.appengine.tools import dev_appserver
# make GAE SDK to grant opening library zip.
dev_appserver.FakeFile.ALLOWED_FILES.add(SDK_PATH)
template_dir = 'google_appengine/templates/'
dev_appserver.ApplicationLoggingHandler.InitializeTemplates(
gae_zip.read(template_dir+dev_appserver.HEADER_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.SCRIPT_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.MIDDLE_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.FOOTER_TEMPLATE))
# ... else it could be a directory.
else:
EXTRA_PATHS = [SDK_PATH]
lib = os.path.join(SDK_PATH, 'lib')
# Automatically add all packages in the SDK's lib folder:
for dir in os.listdir(lib):
# SDK 1.4.2 introduced Django 1.2, and renamed django to django_0_96
if dir == 'django_0_96':
EXTRA_PATHS.append(os.path.join(lib, dir))
continue
path = os.path.join(lib, dir)
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(path, dir), os.path.join(path, 'lib', dir))
for path in detect:
if os.path.isdir(path):
EXTRA_PATHS.append(os.path.dirname(path))
break
sys.path = EXTRA_PATHS + sys.path
# corresponds with another google package
if sys.modules.has_key('google'):
del sys.modules['google']
from google.appengine.api import apiproxy_stub_map
setup()
if not manage_py_env:
return
print 'Running on Kay-%s' % __version__
def setup():
setup_syspath()
from kay.conf import settings
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
if getattr(settings, 'ADD_APP_PREFIX_TO_KIND', True) and settings.DEBUG:
logging.info(
"ADD_APP_PREFIX_TO_KIND is deprecated. Please add a kind() method"
"to your models that returns the correct name for the entity"
)
class _meta(object):
__slots__ = ('object_name', 'app_label', 'module_name', '_db_table',
'abstract')
def __init__(self, model):
try:
self.app_label = model.__module__.split('.')[-2]
except IndexError:
logging.warn('Kay expects models (here: %s.%s) to be defined in their'
' own apps!' % (model.__module__, model.__name__))
self.app_label = None
self.module_name = model.__name__.lower()
self.abstract = model is db.Model
self.object_name = model.__name__
def _set_db_table(self, db_table):
self._db_table = db_table
def _get_db_table(self):
if getattr(settings, 'ADD_APP_PREFIX_TO_KIND', True):
if hasattr(self, '_db_table'):
return self._db_table
return '%s_%s' % (self.app_label, self.module_name)
return self.object_name
db_table = property(_get_db_table, _set_db_table)
def _initialize_model(cls):
cls._meta = _meta(cls)
old_propertied_class_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs, map_kind=True):
"""
Just add _meta to db.Model.
"""
_initialize_model(cls)
old_propertied_class_init(cls, name, bases, attrs,
not cls._meta.abstract)
db.PropertiedClass.__init__ = __init__
old_poly_init = polymodel.PolymorphicClass.__init__
def __init__(cls, name, bases, attrs):
if polymodel.PolyModel not in bases:
_initialize_model(cls)
old_poly_init(cls, name, bases, attrs)
polymodel.PolymorphicClass.__init__ = __init__
@classmethod
def kind(cls):
return cls._meta.db_table
db.Model.kind = kind
def setup_syspath():
if not PROJECT_DIR in sys.path:
sys.path = [PROJECT_DIR] + sys.path
if not LIB_DIR in sys.path:
sys.path = [LIB_DIR] + sys.path
if not PROJECT_LIB_DIR in sys.path:
sys.path = [PROJECT_LIB_DIR] + sys.path
|
Letractively/kay-framework
|
kay/__init__.py
|
Python
|
bsd-3-clause
| 7,280
|
[
"VisIt"
] |
d6eec0376ad436b3692143f4579f0dc2fb36450782e700340caf93d9538c8c74
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
image = mpimg.imread('exit-ramp.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) #grayscale conversion
# Define a kernel size for Gaussian smoothing / blurring
# Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
# Define parameters for Canny and run it
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# Next we'll create a masked edges image using cv2.fillPoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
imshape = image.shape
#vertices = np.array([[(0,imshape[0]),(0, 0), (imshape[1], 0), (imshape[1],imshape[0])]], dtype=np.int32)
vertices = np.array([[(0,imshape[0]),(imshape[1]*0.25, imshape[0]*0.45), (imshape[1]*0.75, imshape[0]*0.45), (imshape[1],imshape[0])]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_edges = cv2.bitwise_and(edges, mask)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
# rho and theta are the distance and angular resolution of our grid in Hough space
# in Hough space, we have a grid laid out along the (theta, rho) axis
# rho is specified in units of pixels
rho = 5
# theta is specified in radians
theta = (np.pi/180)*1
# threshold is number of intersections in hough space needed to make it into the output
threshold = 200
# min_line_length is the mininum acceptable pixel count of a line in the output
min_line_length = 80
# max_line_gap is the maximum acceptable pixel gap between two points making it to a line
max_line_gap = 5
line_image = np.copy(image)*0 #creating a blank to draw lines on
# Run Hough on edge detected image
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
# Iterate over the output "lines" and draw lines on the blank
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
combo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
plt.imshow(combo)
plt.show()
|
DavidObando/carnd
|
Term1/Project1/Other Material/exitramp.py
|
Python
|
apache-2.0
| 2,407
|
[
"Gaussian"
] |
28dd4fbc345b004e7a6ea2bc228443f78a209477d507ab51c3bd6b1e24238c17
|
# [1] https://www.sciencedirect.com/science/article/pii/S0166128098004758
# https://doi.org/10.1016/S0166-1280(98)00475-8
# Dapprich, Frisch, 1998
# [2] https://onlinelibrary.wiley.com/doi/abs/10.1002/9783527629213.ch2
# Clemente, Frisch, 2010
#
# Not implemented in pysisyphus
#
# [2] https://aip.scitation.org/doi/pdf/10.1063/1.2814164?class=pdf
# QM/QM ONIOM EE based on Mulliken charges
# Hratchian, Raghavachari, 2008
# [3] https://aip.scitation.org/doi/full/10.1063/1.3315417<Paste>
# QM/QM ONIOM EE based on Löwdin charges
# Mayhall, Hratchian, 2010
# [4] https://www.frontiersin.org/articles/10.3389/fchem.2018.00089/full
# Overview on hybrid methods
# [5] https://doi.org/10.1021/jp0446332
# Electronic embedding charge redistribution
# Lin, Truhlar 2004
# Excited state ONIOM
# [5] https://aip.scitation.org/doi/pdf/10.1063/1.4972000?class=pdf
# [6] https://pubs.rsc.org/en/content/articlehtml/2012/pc/c2pc90007f
import itertools as it
import logging
from collections import namedtuple
import numpy as np
import scipy.sparse as sparse
from pysisyphus.calculators import (
Composite,
Gaussian16,
OpenMolcas,
ORCA,
ORCA5,
Psi4,
Turbomole,
XTB,
PyXTB,
)
from pysisyphus.calculators.Calculator import Calculator
from pysisyphus.elem_data import COVALENT_RADII as CR
from pysisyphus.Geometry import Geometry
from pysisyphus.helpers_pure import full_expand
from pysisyphus.intcoords.setup import get_bond_sets
from pysisyphus.intcoords.setup_fast import get_bond_vec_getter
from pysisyphus.wrapper.jmol import render_geom_and_charges
CALC_DICT = {
"composite": Composite,
"g16": Gaussian16,
"openmolcas": OpenMolcas.OpenMolcas,
"orca": ORCA.ORCA,
"orca5": ORCA5.ORCA5,
"psi4": Psi4,
"turbomole": Turbomole,
"xtb": XTB.XTB,
# "pypsi4": PyPsi4,
"pyxtb": PyXTB.PyXTB,
}
try:
from pysisyphus.calculators.PySCF import PySCF
CALC_DICT["pyscf"] = PySCF
except ImportError:
# print("Error importing PySCF in ONIOMv2")
pass
Link = namedtuple("Link", "ind parent_ind atom g")
def get_g_value(atom, parent_atom, link_atom):
cr, pcr, lcr = [CR[a.lower()] for a in (atom, parent_atom, link_atom)]
# Ratio between sum of CR_atom and CR_link with sum of CR_atom CR_parent_atom.
# See [1] Sect. 2.2 page 5.
g = (cr + lcr) / (cr + pcr)
return g
def cap_fragment(atoms, coords, fragment, link_atom="H", g=0.709):
coords3d = coords.reshape(-1, 3)
fragment_set = set(fragment)
# Determine bond(s) that connect fragment with the rest
bonds = get_bond_sets(atoms, coords3d)
bond_sets = [set(b) for b in bonds]
# Find all bonds that involve one atom of model. These bonds
# connect the model to the real geometry. We want to cap these
# bonds.
break_bonds = [b for b in bond_sets if len(b & fragment_set) == 1]
# Put capping atoms at every bond to break.
# The model fragment size corresponds to the length of the union of
# the model set and the atoms in break_bonds.
capped_frag = fragment_set.union(*break_bonds)
capped_inds = list(sorted(capped_frag))
# Index map between the new model geometry and the original indices
# in the real geometry.
atom_map = {
model_ind: real_ind
for real_ind, model_ind in zip(capped_inds, range(len(capped_inds)))
}
links = list()
for bb in break_bonds:
to_cap = bb - fragment_set
assert len(to_cap) == 1
ind = list(bb - to_cap)[0]
parent_ind = tuple(to_cap)[0]
if g is None:
g = get_g_value(atoms[ind], atoms[parent_ind], link_atom)
link = Link(ind=ind, parent_ind=parent_ind, atom=link_atom, g=g)
links.append(link)
return atom_map, links
def atom_inds_to_cart_inds(atom_inds):
stencil = np.array((0, 1, 2), dtype=int)
size_ = len(atom_inds)
cart_inds = np.tile(stencil, size_) + np.repeat(atom_inds, 3) * 3
return cart_inds
class ModelDummyCalc:
def __init__(self, model): # , all_atoms, all_coords):
self.model = model
def get_energy(self, atoms, coords):
energy = self.model.get_energy(atoms, coords, cap=False)
results = {"energy": energy}
return results
def get_forces(self, atoms, coords):
energy, forces = self.model.get_forces(atoms, coords, cap=False)
forces_ = np.zeros((len(atoms), 3))
forces_[: len(atoms) - len(self.model.links)] = forces.reshape(-1, 3)[
self.model.atom_inds
]
results = {"energy": energy, "forces": forces_.flatten()}
return results
# def get_hessian(self, atoms, coords):
# energy, hessian = self.model.get_hessian(atoms, coords, cap=False)
# results = {"energy": energy, "hessian": hessian}
# return results
class Model:
def __init__(
self,
name,
calc_level,
calc,
parent_name,
parent_calc_level,
parent_calc,
atom_inds,
parent_atom_inds,
use_link_atoms=True,
):
self.name = name
self.calc_level = calc_level
self.calc = calc
self.parent_name = parent_name
self.parent_calc_level = parent_calc_level
self.parent_calc = parent_calc
self.atom_inds = list(atom_inds)
# parent_atom_inds may be None
try:
self.parent_atom_inds = list(parent_atom_inds)
except TypeError:
self.parent_atom_inds = None
self.use_link_atoms = use_link_atoms
self.links = list()
self.capped = False
self.J = None
def log(self, message=""):
logger = logging.getLogger("calculator")
logger.debug(self.__str__() + " " + message)
def create_links(self, atoms, coords, debug=False):
self.capped = True
if self.use_link_atoms and self.parent_name is not None:
_, self.links = cap_fragment(atoms, coords, self.atom_inds)
self.capped_atom_num = len(self.atom_inds) + len(self.links)
for i, link in enumerate(self.links):
ind, parent_ind = link.ind, link.parent_ind
self.log(
f"\tCreated Link atom ({link.atom}) between {atoms[ind]}{ind} "
f"and {atoms[parent_ind]}{parent_ind} (g={link.g:.6f})"
)
if len(self.links) == 0:
self.log("Didn't create any link atoms!\n")
# self.J = self.get_jacobian()
self.J = self.get_sparse_jacobian()
if debug:
catoms, ccoords = self.capped_atoms_coords(atoms, coords)
geom = Geometry(catoms, ccoords)
geom.jmol()
def capped_atoms_coords(self, all_atoms, all_coords):
assert self.capped, "Did you forget to call create_links()?"
org_atom_num = len(self.atom_inds)
c3d = all_coords.reshape(-1, 3)
capped_atoms = [all_atoms[i] for i in self.atom_inds]
# Initialize empty coordinate array
capped_coords = np.zeros((self.capped_atom_num, 3))
# Copy non-capped coordinates
capped_coords[:org_atom_num] = c3d[self.atom_inds]
for i, link in enumerate(self.links):
capped_atoms.append(link.atom)
r1 = c3d[link.ind]
r3 = c3d[link.parent_ind]
r2 = r1 + link.g * (r3 - r1)
capped_coords[org_atom_num + i] = r2
return capped_atoms, capped_coords
def create_bond_vec_getters(self, atoms):
link_parent_inds = [link.parent_ind for link in self.links]
no_bonds_with = [
[
link.ind,
]
for link in self.links
]
self.log(
f"Model has {len(link_parent_inds)} link atom hosts: {link_parent_inds}"
)
covalent_radii = [CR[atom.lower()] for atom in atoms]
self.get_bond_vecs = get_bond_vec_getter(
atoms,
covalent_radii,
link_parent_inds,
no_bonds_with,
)
def get_jacobian(self):
try:
# Shape of Jacobian is (model + link, real). TypeError will be raised
# when self.parent_atom_inds is None.
jac_shape = (
len(self.atom_inds) * 3 + len(self.links) * 3,
len(self.parent_atom_inds) * 3,
)
except TypeError:
return None
J = np.zeros(jac_shape)
# Stencil for diagonal elements of 3x3 submatrix
stencil = np.array((0, 1, 2), dtype=int)
size_ = len(self.atom_inds)
model_rows = np.arange(size_ * 3)
# When more than two layers are present the inner layers aren't directly
# embedded in the outermost layer. This means parent_inds does not begin
# at 0, but with a higher index. So we need a map of the actual indices
# (not starting at 0) to the indices in the Jacobian which start at 0.
atom_inds = [self.parent_atom_inds.index(ind) for ind in self.atom_inds]
ind_map = {k: v for k, v in zip(self.atom_inds, atom_inds)}
model_cols = atom_inds_to_cart_inds(atom_inds)
J[model_rows, model_cols] = 1
# Link atoms
link_start = model_rows.max() + 1
for i, (ind, parent_ind, atom, g) in enumerate(self.links):
rows = link_start + i * 3 + stencil
cols = ind_map[ind] * 3 + stencil
J[rows, cols] = 1 - g
try:
parent_cols = self.parent_atom_inds.index(parent_ind) * 3 + stencil
J[rows, parent_cols] = g
# Raised when link atom is not coupled to layer above, but
# to a layer higher above.
except ValueError:
pass
return J
def get_sparse_jacobian(self):
try:
# Shape of Jacobian is (model + link, real). TypeError will be raised
# when self.parent_atom_inds is None.
jac_shape = (
len(self.atom_inds) * 3 + len(self.links) * 3,
len(self.parent_atom_inds) * 3,
)
except TypeError:
return None
# Stencil for diagonal elements of 3x3 submatrix
stencil = np.array((0, 1, 2), dtype=int)
ones = np.ones_like(stencil)
size_ = len(self.atom_inds)
model_rows = np.arange(size_ * 3)
# When more than two layers are present the inner layers aren't directly
# embedded in the outermost layer. This means parent_inds does not begin
# at 0, but with a higher index. So we need a map of the actual indices
# (not starting at 0) to the indices in the Jacobian which start at 0.
atom_inds = [self.parent_atom_inds.index(ind) for ind in self.atom_inds]
ind_map = {k: v for k, v in zip(self.atom_inds, atom_inds)}
model_cols = atom_inds_to_cart_inds(atom_inds)
jac_rows = model_rows.tolist()
jac_cols = model_cols.tolist()
jac_data = np.ones_like(jac_cols).tolist()
# Link atoms
link_start = model_rows.max() + 1
for i, (ind, parent_ind, atom, g) in enumerate(self.links):
rows = (link_start + i * 3 + stencil).tolist()
cols = (ind_map[ind] * 3 + stencil).tolist()
jac_rows += rows
jac_cols += cols
jac_data += (ones - g).tolist()
try:
parent_cols = (self.parent_atom_inds.index(parent_ind) * 3 + stencil).tolist()
jac_rows += rows
jac_cols += parent_cols
jac_data += np.full_like(parent_cols, g, dtype=float).tolist()
# Raised when link atom is not coupled to layer above, but
# to a layer higher above.
except ValueError:
pass
J = sparse.csr_matrix((jac_data, (jac_rows, jac_cols)), shape=jac_shape)
return J
def get_energy(
self, atoms, coords, point_charges=None, parent_correction=True, cap=True
):
self.log("Energy calculation")
if cap:
catoms, ccoords = self.capped_atoms_coords(atoms, coords)
else:
catoms = atoms
ccoords = coords
prepare_kwargs = {
"point_charges": point_charges,
}
self.log("Calculation at layer level")
results = self.calc.get_energy(catoms, ccoords, **prepare_kwargs)
energy = results["energy"]
# Calculate correction if parent layer is present and it is requested
if (self.parent_calc is not None) and parent_correction:
self.log("Calculation at parent layer level")
parent_results = self.parent_calc.get_energy(
catoms, ccoords, **prepare_kwargs
)
parent_energy = parent_results["energy"]
energy -= parent_energy
elif not parent_correction:
self.log("No parent correction!")
return energy
def get_forces(
self, atoms, coords, point_charges=None, parent_correction=True, cap=True
):
self.log("Force calculation")
# catoms, ccoords = self.capped_atoms_coords(atoms, coords)
if cap:
catoms, ccoords = self.capped_atoms_coords(atoms, coords)
else:
catoms = atoms
ccoords = coords
prepare_kwargs = {
"point_charges": point_charges,
}
self.log("Calculation at layer level")
results = self.calc.get_forces(catoms, ccoords, **prepare_kwargs)
forces = results["forces"]
energy = results["energy"]
if self.J is not None:
# forces = forces.dot(self.J)
# f^T J = (J^T f)^T
# The transpose of the term in brackets can be ignored here, as numpy
# does not distinguish between f and f^T for a 1d-array.
forces = self.J.T @ forces
# Calculate correction if parent layer is present and it is requested
if (self.parent_calc is not None) and parent_correction:
self.log("Calculation at parent layer level")
parent_results = self.parent_calc.get_forces(
catoms, ccoords, **prepare_kwargs
)
parent_forces = parent_results["forces"]
parent_energy = parent_results["energy"]
# Correct energy and forces
energy -= parent_energy
forces -= self.J.T @ parent_forces
elif not parent_correction:
self.log("No parent correction!")
return energy, forces
def get_hessian(
self, atoms, coords, point_charges=None, parent_correction=True, cap=True
):
self.log("Hessian calculation")
# catoms, ccoords = self.capped_atoms_coords(atoms, coords)
if cap:
catoms, ccoords = self.capped_atoms_coords(atoms, coords)
else:
catoms = atoms
ccoords = coords
prepare_kwargs = {
"point_charges": point_charges,
}
self.log("Calculation at layer level")
# results = self.calc.get_hessian(catoms, ccoords, prepare_kwargs)
results = self.calc.get_hessian(catoms, ccoords, **prepare_kwargs)
hessian = results["hessian"]
energy = results["energy"]
if self.J is not None:
# hessian = self.J.T.dot(hessian.dot(self.J))
hessian = (self.J.T @ hessian) @ self.J
# Calculate correction if parent layer is present and it is requested
if (self.parent_calc is not None) and parent_correction:
self.log("Calculation at parent layer level")
parent_results = self.parent_calc.get_hessian(catoms, ccoords, **prepare_kwargs)
parent_hessian = parent_results["hessian"]
parent_energy = parent_results["energy"]
# Correct energy and hessian
energy -= parent_energy
# hessian -= self.J.T.dot(parent_hessian.dot(self.J))
hessian -= (self.J.T @ parent_hessian) @ self.J
elif not parent_correction:
self.log("No parent correction!")
return energy, hessian
# def get_delta_S(self, atoms, coords):
# self.log("ΔS calculation")
# catoms, ccoords = self.capped_atoms_coords(atoms, coords)
# # Parent calculator
# E_parent_real = self.parent_calc.get_energy(atoms, coords)["energy"]
# self.parent_calc.reset()
# E_parent_model = self.parent_calc.get_energy(catoms, ccoords)["energy"]
# S_low = E_parent_real - E_parent_model
# self.log(f"S_low={S_low:.6f} au")
# print(f"S_low={S_low:.6f} au")
# # High level calculator
# E_high_real = self.calc.get_energy(atoms, coords)["energy"]
# self.calc.reset()
# E_high_model = self.calc.get_energy(catoms, ccoords)["energy"]
# S_high = E_high_real - E_high_model
# self.log(f"S_high={S_high:.6f} au")
# print(f"S_high={S_high:.6f} au")
# delta_S = S_low - S_high
# self.log(f"ΔS={delta_S:.6f} au")
# print(f"ΔS={delta_S:.6f} au")
# return delta_S
def parse_charges(self):
charges = self.calc.parse_charges()
try:
parent_charges = self.parent_calc.parse_charges()
except AttributeError:
parent_charges = None
return charges, parent_charges
def as_geom(self, all_atoms, all_coords):
capped_atoms, capped_coords3d = self.capped_atoms_coords(all_atoms, all_coords)
geom = Geometry(capped_atoms, capped_coords3d)
dummy_calc = ModelDummyCalc(self)
geom.set_calculator(dummy_calc)
return geom
def __str__(self):
return (
f"Model({self.name}, {len(self.atom_inds)} atoms, "
f"level={self.calc_level}, parent_level={self.parent_calc_level})"
)
def __repr__(self):
return self.__str__()
def get_embedding_charges(embedding, layer, parent_layer, coords3d):
# Only consider charges that belong to atoms in the parent
# layer. Otherwise this would result in additonal charges at
# the same positions as the atoms we would like to calculate.
if "electronic" in embedding:
assert (
len(parent_layer) == 1
), "Multicenter ONIOM in intermediate layer is not supported!"
parent_model = parent_layer[0]
parent_inds = parent_model.atom_inds
point_charges, _ = parent_model.parse_charges()
layer_inds = set(*it.chain([model.atom_inds for model in layer]))
# Determine indices of atoms that are in the parent layer, but
# not in the current layer
only_parent_inds = list(set(parent_inds) - layer_inds)
del_charge_inds = list()
all_redist_coords_charges = list()
# Here, redistributed and scaled charges are calculated. In the EE-RC and EE-RCD
# schemes the link atom parent (LAP) charges are divided by the number of bonds
# connected to the LAP minus 1. They are put halfway along theses bonds.
# See [5] for a discussion.
# EE-RC and EE-RCD are very similar. The block below handles calculations that
# are common to both methods, e.g. calculation of the redistributed charges and
# their coordinates.
#
# This will be executed for 'electronic_rc' and 'electronic_rcd'
if "electronic_rc" in embedding:
# Collect charges for models in a layer, e.g., for multicenter ONIOM.
for model in layer:
redist_coords_charges = list()
single_redist_charges = list()
# Determine bonds, connected to link parent.
link_host_bond_vecs, bonded_inds = model.get_bond_vecs(
coords3d, return_bonded_inds=True
)
# Determine link atoms
links = model.links
for link, bond_vecs in zip(links, link_host_bond_vecs):
parent_ind = link.parent_ind
# Presence of a link atom implies a bond.
assert len(bond_vecs) > 0
# *parent_coords, parent_charge = point_charges[link.parent_ind]
# parent_charge = ee_charges[parent_ind]
parent_charge = point_charges[parent_ind]
parent_coords = coords3d[parent_ind]
bond_num = len(bond_vecs)
redist_charge = parent_charge / bond_num
single_redist_charges.append(redist_charge)
# Put modified charges halfway on the bonds
redist_coords = parent_coords + bond_vecs / 2
redist_coords_charges.extend(
[(*coords, redist_charge) for coords in redist_coords]
)
del_charge_inds.append(parent_ind)
redist_coords_charges = np.array(redist_coords_charges)
# Redistributed charges and dipoles to preserve the M1-M2 bond dipoles. See [5].
if embedding == "electronic_rcd":
# Multiply all redistributed charges by 2
redist_coords_charges[:, -1] *= 2
# Substract original redistributed charge from M2 charges
for binds, src in zip(bonded_inds, single_redist_charges):
point_charges[binds] -= src
# Gather redistributed charges of separate models (centers)
all_redist_coords_charges.extend(redist_coords_charges)
assert len(del_charge_inds) == len(set(del_charge_inds)), (
"It seems that one parent hosts multiple link atoms. I did not think about "
"cases like that yet!"
)
# Only keep charges that are not on link atom hosts/parents
keep_mask = [opi for opi in only_parent_inds if opi not in del_charge_inds]
kept_point_charges = point_charges[keep_mask]
kept_coords3d = coords3d[keep_mask]
kept_coords_point_charges = np.concatenate(
(kept_coords3d, kept_point_charges[:, None]), axis=1
)
# Join unmodified charges and redistributed charges
if len(all_redist_coords_charges) > 0:
kept_coords_point_charges = np.concatenate(
(kept_coords_point_charges, all_redist_coords_charges), axis=0
)
return kept_coords_point_charges
class ONIOM(Calculator):
embeddings = {
"": "",
"electronic": "Electronic embedding",
"electronic_rc": "Electronic embedding with redistributed charges",
"electronic_rcd": "Electronic embedding with redistributed charges and dipoles",
}
def __init__(
self,
calcs,
models,
geom,
layers=None,
embedding="",
real_key="real",
use_link_atoms=True,
*args,
**kwargs,
):
"""
layer: list of models
len(layer) == 1: normal ONIOM, len(layer) >= 1: multicenter ONIOM.
model:
(sub)set of all atoms that resides in a certain layer and has
a certain calculator.
"""
super().__init__(*args, **kwargs)
if embedding is None:
embedding = ""
assert (
embedding in self.embeddings.keys()
), f"Valid embeddings are: {self.embeddings.keys()}"
self.embedding = embedding
assert real_key not in models, f'"{real_key}" must not be defined in "models"!'
assert real_key in calcs, f'"{real_key}" must be defined in "calcs"!'
self.use_link_atoms = use_link_atoms
# Expand index-lists in models
for model in models.values():
if ".." in model["inds"]:
model["inds"] = full_expand(model["inds"])
# When no ordering of layers is given we try to guess it from
# the size of the respective models. It's probably a better idea
# to always specify the layer ordering though ;)
if layers is None:
self.log(
"No explicit layer ordering specified! Determining layer "
"hierarchy from model sizes. This does not support multi-"
"center ONIOM!"
)
as_list = [(key, val) for key, val in models.items()]
# Determine hierarchy of models, from biggest to smallest model
layers = [
key
for key, val in sorted(
as_list, key=lambda model: -len(model[1]["inds"])
)
]
assert real_key not in layers, f'"{real_key}" must not be defined in "layers"!'
############
# #
# LAYERS #
# #
############
# Add real model and layer as they are missing right now. The real
# layer is always the last layer. The real layer is always calculated
# by the 'realkey'-calculator.
layers = [real_key] + layers
models[real_key] = {
"calc": real_key,
"inds": list(range(len(geom.atoms))),
}
self.log(f"Layer-ordering from big to small: {layers}")
# Single-model layers will be given as strings. As we also support
# multicenter-ONIOM there may also be layers that are given as lists
# that contain multiple models per layer.
# Now we convert the single-model layers to lists of length 1, so
# every layer is a list.
layers = [
[
layer,
]
if isinstance(layer, str)
else layer
for layer in layers
]
self.layer_num = len(layers)
assert self.layer_num > 1, "ONIOM with only 1 layer requested. Aborting!"
############
# #
# MODELS #
# #
############
# Create mapping between model and its parent layer. Actually
# this is a bit hacky right now, as the mapping should not be between
# model and parent layer, but between model and parent model.
# This way we expect the parent layer to have the same calculator
# throughout, so multicenter ONIOM with different calculators
# in all but the smallest layer (highest level) is not well defined.
#
# If multicenter ONIOM in an intermediate layer is useful may
# be another question to be answered ;).
self.model_parent_layers = dict()
for i, layer in enumerate(layers[1:]):
self.model_parent_layers.update({model: i for model in layer})
model_keys = list(it.chain(*layers))
cur_calc_num = 0
def get_calc(calc_key, base_name=None):
"""Helper function for easier generation of calculators
with incrementing calc_number."""
nonlocal cur_calc_num
kwargs = calcs[calc_key].copy()
type_ = kwargs.pop("type")
kwargs["calc_number"] = cur_calc_num
if base_name is not None:
kwargs["base_name"] = base_name
calc = CALC_DICT[type_](**kwargs)
cur_calc_num += 1
return calc
# Create models and required calculators
self.models = list()
self.layers = [list() for _ in layers]
for model in model_keys[1:]:
parent_layer_ind = self.model_parent_layers[model]
parent_layer = layers[parent_layer_ind]
parent_calc_keys = set([models[model]["calc"] for model in parent_layer])
assert len(parent_calc_keys) == 1, (
"It seems you are trying to run a multicenter ONIOM setup in "
"an intermediate layer with different calculators. This is "
"not supported right now."
)
parent = parent_layer[0]
model_calc_key = models[model]["calc"]
parent_calc_key = models[parent]["calc"]
model_base_name = f"{model}_{model_calc_key}"
model_calc = get_calc(model_calc_key, base_name=model_base_name)
parent_base_name = f"{model}_parent"
parent_calc = get_calc(parent_calc_key, base_name=parent_base_name)
model = Model(
name=model,
calc_level=model_calc_key,
calc=model_calc,
parent_name=parent,
parent_calc_level=parent_calc_key,
parent_calc=parent_calc,
atom_inds=models[model]["inds"],
parent_atom_inds=models[parent]["inds"],
use_link_atoms=self.use_link_atoms,
)
self.models.append(model)
self.layers[parent_layer_ind + 1].append(model)
# All real model
real_calc = get_calc(real_key)
real_model = Model(
name=real_key,
calc_level=real_key,
calc=real_calc,
parent_name=None,
parent_calc_level=None,
parent_calc=None,
atom_inds=list(range(len(geom.atoms))),
parent_atom_inds=None,
)
self.models.insert(0, real_model)
self.layers[0].append(real_model)
# Reverse order of models so the first model is the real system
# self.models = self.models[::-1]
self.log("Created all ONIOM layers:")
for model in self.models:
self.log("\t" + str(model))
# Create link atoms
[model.create_links(geom.atoms, geom.cart_coords) for model in self.models]
# Create functions to calculate bond vectors with link atom hosts
[model.create_bond_vec_getters(geom.atoms) for model in self.models]
# And do a quick sanity check
assert (
len(self.models[0].links) == 0
), "There must not be any links in the 'real' layer!"
# Look for link atoms that appear in two adjacent layers. In such situations
# the higher layer is coupled to a layer two levels below. This may be a bad
# idea.
for i, (lower_model, model) in enumerate(
zip(self.models[:-1], self.models[1:])
):
lower_links = lower_model.links
links = model.links
same_links = [link for link in links if link in lower_links]
if same_links:
print(f"Found {len(same_links)} link(s) that appear(s) in two layers!")
for j, link in enumerate(same_links):
print(f"\t{j:02d}: {link}")
print(
f"Your current setup couples layer '{model.name}' to "
f"layer '{self.models[i-1].name}' two levels below! "
"This is probably a bad idea!"
)
self.log(
f"Created ONIOM calculator with {self.layer_num} layers and "
f"{len(self.models)} models."
)
def run_calculations(self, atoms, coords, method):
self.log(f"{self.embeddings[self.embedding]} ONIOM calculation")
all_results = list()
for i, layer in enumerate(self.layers):
point_charges = None
# Calculate embedding charges, if required
if self.embedding and (i > 0):
parent_layer = self.layers[i - 1]
coords3d = coords.reshape(-1, 3)
point_charges = get_embedding_charges(
self.embedding, layer, parent_layer, coords3d
)
self.log(
f"Polarizing calculation in layer {i} ({layer}) by "
f"charges from layer {i-1} ({self.layers[i-1]})."
)
ee_charge_sum = point_charges[:, -1].sum()
self.log(f"sum(charges)={ee_charge_sum:.4f}")
# Enable for debugging
# if len(layer) == 1:
# model = layer[0]
# tmp_atoms, tmp_coords = model.capped_atoms_coords(atoms, coords)
# render_geom_and_charges(
# Geometry(tmp_atoms, tmp_coords), point_charges
# )
results = [
getattr(model, method)(atoms, coords, point_charges=point_charges)
for model in layer
]
all_results.extend(results)
self.calc_counter += 1
return all_results
def run_calculation(self, atoms, coords):
self.log("run_calculation() called. Doing simple energy calculation!")
return self.get_energy(atoms, coords)
def get_energy(self, atoms, coords):
all_energies = self.run_calculations(atoms, coords, "get_energy")
energy = sum(all_energies)
return {
"energy": energy,
}
def get_forces(self, atoms, coords):
all_results = self.run_calculations(atoms, coords, "get_forces")
energies, forces_ = zip(*all_results)
forces_ = [np.array(f).reshape(-1, 3) for f in forces_]
energy = sum(energies)
forces = forces_[0]
for mdl, f in zip(self.models[1:], forces_[1:]):
forces[mdl.parent_atom_inds] += f
return {
"energy": energy,
"forces": forces.flatten(),
}
def get_hessian(self, atoms, coords):
all_results = self.run_calculations(atoms, coords, "get_hessian")
energies, hessians = zip(*all_results)
energy = sum(energies)
hessian = hessians[0]
for mdl, h in zip(self.models[1:], hessians[1:]):
inds = atom_inds_to_cart_inds(mdl.parent_atom_inds)
# Keep in mind that we modify hessians[0] in place
hessian[inds[:, None], inds[None, :]] += h
return {
"energy": energy,
"hessian": hessian,
}
def atom_inds_in_layer(self, index, exclude_inner=False):
"""Returns list of atom indices in layer at index.
Atoms that also appear in inner layer can be excluded on request.
Parameters
----------
index : int
pasd
exclude_inner : bool, default=False, optional
Whether to exclude atom indices that also appear in inner layers.
Returns
-------
atom_indices : list
List containing the atom indices in the selected layer.
"""
layer = self.layers[index]
atom_inds = list(it.chain(*[model.atom_inds for model in layer]))
if exclude_inner and (index < len(self.layers) - 1):
lower_inds = self.atom_inds_in_layer(index + 1)
# Drop indices that appear in inner layers
atom_inds = [i for i in atom_inds if i not in lower_inds]
return atom_inds
def calc_layer(self, atoms, coords, index, parent_correction=True):
layer = self.layers[index]
assert len(layer) == 1, "Multicenter not yet supported!"
(model,) = layer
result = model.get_forces(atoms, coords, parent_correction=parent_correction)
return result
|
eljost/pysisyphus
|
pysisyphus/calculators/ONIOMv2.py
|
Python
|
gpl-3.0
| 34,889
|
[
"Jmol",
"ORCA",
"OpenMolcas",
"Psi4",
"PySCF",
"TURBOMOLE",
"xTB"
] |
d25f3bf7a0bd0344e70f222f554cb9272e229c72ee1cbd9e10e9ba062f8967ea
|
#!/usr/bin/env python
'''
Optimize the geometry of excited states using CASSCF or CASCI
Note when optiming the excited states, states may flip and this may cause
convergence issue in geometry optimizer.
'''
from pyscf import gto
from pyscf import scf, mcscf
mol = gto.Mole()
mol.atom="N; N 1, 1.1"
mol.basis= "6-31g"
mol.build()
mf = scf.RHF(mol).run()
#
# 1. Geometry optimization over a specific state.
#
# Targeting at one excited state
mc = mcscf.CASCI(mf, 4,4)
mc.state_specific_(2)
excited_grad = mc.nuc_grad_method().as_scanner()
mol1 = excited_grad.optimizer().kernel()
# Code above is equivalent to
mc = mcscf.CASCI(mf, 4,4)
mc.fcisolver.nstates = 3
excited_grad = mc.nuc_grad_method().as_scanner(state=2)
mol1 = excited_grad.optimizer().kernel()
# CASSCF for one excited state
mc = mcscf.CASSCF(mf, 4,4)
mc.state_specific_(2)
excited_grad = mc.nuc_grad_method().as_scanner()
mol1 = excited_grad.optimizer().kernel()
#
# 2. Geometry optimization over an averaged state.
# Note the state-averaged gradients are optimized.
#
mc = mcscf.CASCI(mf, 4,4)
mc.state_average_([0.25, 0.25, 0.25, 0.25])
excited_grad = mc.nuc_grad_method().as_scanner()
mol1 = excited_grad.optimizer().kernel()
mc = mcscf.CASSCF(mf, 4,4)
mc.state_average_([0.25, 0.25, 0.25, 0.25])
excited_grad = mc.nuc_grad_method().as_scanner()
mol1 = excited_grad.optimizer().kernel()
#
# 3. Geometry optimization for mixed FCI solvers.
# Note the state-averaged gradients are optimized.
#
import copy
mc = mcscf.CASSCF(mf, 4,4)
solver1 = mc.fcisolver
solver2 = copy.copy(mc.fcisolver)
solver2.spin = 2
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2], (.5, .5))
excited_grad = mc.nuc_grad_method().as_scanner()
mol1 = excited_grad.optimizer().kernel()
#
# 4. Geometry optimization of the 3rd of 4 states
#
mc = mcscf.CASSCF(mf, 4,4)
mc.state_average_([0.25, 0.25, 0.25, 0.25])
excited_grad = mc.nuc_grad_method().as_scanner(state=2)
mol1 = excited_grad.optimizer().kernel()
#
# 4. Geometry optimization of the triplet state
# In a triplet-singlet state average
#
mc = mcscf.CASSCF(mf, 4,4)
solver1 = mc.fcisolver
solver2 = copy.copy(mc.fcisolver)
solver2.spin = 2
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2], (.5, .5))
excited_grad = mc.nuc_grad_method().as_scanner(state=1)
mol1 = excited_grad.optimizer().kernel()
|
sunqm/pyscf
|
examples/geomopt/12-mcscf_excited_states.py
|
Python
|
apache-2.0
| 2,328
|
[
"PySCF"
] |
7733152497246db6d39a97297c5b44dc0b0e730a16ba75c7616d158901588eb0
|
#!/usr/bin/env python
'''
This program takes a list of module files and creates a (possibly disjoint)
directed graph of the modules and their dependencies. Arrows on the
directed graph point to the dependent module.
Typical usage would be as follows:
VisualizeModuleDependencies.py VTKSourceDir vtkFiltersSources,vtkInteractionStyle,vtkRenderingOpenGL
'''
import os, sys
from collections import defaultdict
import vtk
def GetProgramParameters():
import argparse
description = 'Creates a directed graph of the modules and their dependencies.'
epilogue = '''
This program takes a list of module files and creates a
(possibly disjoint) directed graph of the modules and their
dependencies. Arrows on the directed graph point to the dependent module.
By default, dependencies of a given module are followed to their maximum
depth. However you can restrict the depth by specifying the depth to
which dependent modules are searched.
The moduleList is a comma-separated list of module names with no
spaces between the names.
The treeDepth defaults to 0, this means that for a given module all
dependent modules will be found. If non-zero, then trees will be only
searched to that depth.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('vtkSourceDir', help='The path to the vtk Source Directory.')
parser.add_argument('moduleList', help='The list of modules.')
parser.add_argument('moduleTreeDepth', help='The depth of the module trees', nargs='?', default=0, type=int)
args = parser.parse_args()
vtkSourceDir = args.vtkSourceDir
moduleList = [x.strip() for x in args.moduleList.split(',')]
moduleTreeDepth = args.moduleTreeDepth
return (vtkSourceDir, moduleList, moduleTreeDepth)
def GetProgramParametersOld():
'''
Used for Python versions < 2.7
'''
if len(sys.argv) < 3:
s = 'Usage: ' + sys.argv[0] + ' vtkSourceDir moduleList [moduleTreeDepth]'
print(s)
exit(0)
args = dict()
args['vtkSourceDir'] = sys.argv[1]
args['moduleList'] = sys.argv[2]
args['moduleTreeDepth'] = 0
if len(sys.argv) > 3:
args['moduleTreeDepth'] = int(sys.argv[3])
vtkSourceDir = args['vtkSourceDir']
moduleList = [x.strip() for x in args['moduleList'].split(',')]
moduleTreeDepth = args['moduleTreeDepth']
return (vtkSourceDir, moduleList, moduleTreeDepth)
def FindModuleFiles(path):
'''
Get a list of module files in the VTK directory.
'''
moduleFiles = [os.path.join(root, name)
for root, dirs, files in os.walk(path)
for name in files
if name == ("module.cmake")]
return moduleFiles
def ParseModuleFile(fileName):
'''
Read each module file returning the module name and what
it depends on or implements.
'''
fh = open(fileName, 'rb')
lines = []
for line in fh:
line = line.strip()
if line.startswith('$'): # Skip CMake variable names
continue
if line.startswith('#'):
continue
line = line.split('#')[0].strip() # inline comments
if line == "":
continue
line = line.split(')')[0].strip() # closing brace with no space
if line == "":
continue
for l in line.split(" "):
lines.append(l)
languages = ['PYTHON', 'TCL', 'JAVA']
keywords = ['BACKEND', 'COMPILE_DEPENDS', 'DEPENDS', 'EXCLUDE_FROM_ALL',
'EXCLUDE_FROM_WRAPPING', 'GROUPS', 'IMPLEMENTS', 'KIT',
'PRIVATE_DEPENDS', 'TEST_DEPENDS', 'IMPLEMENTATION_REQUIRED_BY_BACKEND'] + \
map(lambda l: 'EXCLUDE_FROM_%s_WRAPPING' % l, languages)
moduleName = ""
depends = []
implements = []
state = "START";
for item in lines:
if state == "START" and item.startswith("vtk_module("):
moduleName = item.split("(")[1]
continue
if item in keywords:
state = item
continue
if state == 'DEPENDS' and item != ')':
depends.append(item)
continue
if state == 'IMPLEMENTS' and item != ')':
implements.append(item)
continue
return [moduleName, depends + implements]
def FindAllNeededModules(modules, foundModules, moduleDepencencies):
'''
Recursively search moduleDependencies finding all modules.
'''
if modules != None and len(modules) > 0:
for m in modules:
foundModules.add(m)
foundModules = foundModules | set(moduleDepencencies[m]) # Set union
foundModules = FindAllNeededModules(moduleDepencencies[m],
foundModules, moduleDepencencies)
return foundModules
def MakeModuleTree(module, index, tree, moduleDependencies, treeDepth, level=0):
'''
For a given module make a tree with the module as the root and the
dependent modules as children.
'''
if module:
index = index + [module]
if treeDepth == 0 or level < treeDepth:
for m in moduleDependencies[module]:
level += 1
MakeModuleTree(m, index, tree, moduleDependencies, treeDepth, level)
level -= 1
Add(tree, index)
# One-line Tree in Python
# See: https:gist.github.com/hrldcpr/2012250
def Tree(): return defaultdict(Tree)
def Add(tree, keys):
for key in keys:
tree = tree[key]
def PrettyPrint(tree, level=0):
'''
Useful to visualize the tree.
'''
result = ''
for k, v in tree.iteritems():
s = ' ' * level + k + '\n'
result += s
level += 1
result += PrettyPrint(v, level)
level -= 1
return result
def GetAllKeys(tree):
'''
Return all the modules in the tree as a set.
'''
modules = set()
for key in tree:
modules = set(list(modules) + [key] + list(GetAllKeys(tree[key])))
return modules
def MakeEdgeList(t):
'''
Return a set that represents the edges in the tree.
'''
edgeList = set()
for k, v in t.iteritems():
subKeys = v.keys()
if subKeys:
for kk in subKeys:
edgeList.add((k, kk))
edg = MakeEdgeList(v)
if edg:
edgeList.update(edg)
return edgeList
def MakeGraph(t, parent='', level=0):
'''
Returns a list that has two elements, the vertices and the edge list.
'''
return [GetAllKeys(t), MakeEdgeList(t)]
def GenerateGraph(moduleList, moduleDepencencies, moduleTreeDepth):
'''
Generate a graph from the module list.
The resultant graph is a list consisting of two sets, the first set
is the set of vertices and the second set is the edge list.
'''
graph = [set(), set()]
for m in moduleList:
t = Tree()
MakeModuleTree(m, [], t, moduleDepencencies, moduleTreeDepth)
g = MakeGraph(t)
graph[0].update(g[0])
if g[1]:
graph[1].update(g[1])
return graph
def GenerateVTKGraph(graph):
'''
Take the vertices and edge list in the graph parameter
and return a VTK graph.
'''
g = vtk.vtkMutableDirectedGraph()
# Label the vertices
labels = vtk.vtkStringArray()
labels.SetNumberOfComponents(1)
labels.SetName("Labels")
index = dict()
l = list(graph[0])
# Make the vertex labels and create a dictionary with the
# keys as labels and the vertex ids as the values.
for i in range(0, len(l)):
# Set the vertex labels
labels.InsertNextValue(l[i])
index[l[i]] = g.AddVertex()
g.GetVertexData().AddArray(labels)
# Add edges
l = list(graph[1])
for i in range(0, len(l)):
ll = list(l[i])
g.AddGraphEdge(index[ll[0]], index[ll[1]])
# g.Dump()
return g
def DisplayGraph(graph):
'''
Display the graph.
'''
theme = vtk.vtkViewTheme()
theme.SetBackgroundColor(0, 0, .1)
theme.SetBackgroundColor2(0, 0, .5)
# Layout the graph
# Pick a strategy you like.
# strategy = vtk.vtkCircularLayoutStrategy()
strategy = vtk.vtkSimple2DLayoutStrategy()
# strategy = vtk.vtkRandomLayoutStrategy()
layout = vtk.vtkGraphLayout()
layout.SetLayoutStrategy(strategy)
layout.SetInputData(graph)
view = vtk.vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(layout.GetOutputPort())
# Tell the view to use the vertex layout we provide.
view.SetLayoutStrategyToPassThrough()
view.SetEdgeLabelVisibility(True)
view.SetVertexLabelArrayName("Labels")
view.SetVertexLabelVisibility(True)
view.ApplyViewTheme(theme)
# Manually create an actor containing the glyphed arrows.
# Get the edge geometry
edgeGeom = vtk.vtkGraphToPolyData()
edgeGeom.SetInputConnection(layout.GetOutputPort())
edgeGeom.EdgeGlyphOutputOn()
# Set the position (0: edge start, 1: edge end) where
# the edge arrows should go.
# edgeGeom.SetEdgeGlyphPosition(0.8)
edgeGeom.SetEdgeGlyphPosition(0.85)
# Make a simple edge arrow for glyphing.
# arrowSource = vtk.vtkGlyphSource2D()
# arrowSource.SetGlyphTypeToEdgeArrow()
# arrowSource.SetScale(0.075)
# Or use a cone.
coneSource = vtk.vtkConeSource()
coneSource.SetRadius(0.025)
coneSource.SetHeight(0.1)
coneSource.SetResolution(12)
# Use Glyph3D to repeat the glyph on all edges.
arrowGlyph = vtk.vtkGlyph3D()
arrowGlyph.SetInputConnection(0, edgeGeom.GetOutputPort(1))
# arrowGlyph.SetInputConnection(1, arrowSource.GetOutputPort())
arrowGlyph.SetInputConnection(1, coneSource.GetOutputPort())
# Add the edge arrow actor to the view.
arrowMapper = vtk.vtkPolyDataMapper()
arrowMapper.SetInputConnection(arrowGlyph.GetOutputPort())
arrowActor = vtk.vtkActor()
arrowActor.SetMapper(arrowMapper)
view.GetRenderer().AddActor(arrowActor)
view.ResetCamera()
view.Render()
view.SetInteractionModeTo3D()
view.GetInteractor().Initialize()
view.GetInteractor().Start()
def main():
ver = list(sys.version_info[0:2])
ver = ver[0] + ver[1] / 10.0
if ver >= 2.7:
vtkSourceDir, moduleList, moduleTreeDepth = GetProgramParameters()
else:
vtkSourceDir, moduleList, moduleTreeDepth = GetProgramParametersOld()
# Parse the module files making a dictionary of each module and its
# dependencies or what it implements.
moduleDepencencies = dict()
moduleFiles = FindModuleFiles(vtkSourceDir + "/")
for fname in moduleFiles:
m = ParseModuleFile(fname)
moduleDepencencies[m[0]] = m[1]
# Generate a graph from the module list.
graph = GenerateGraph(moduleList, moduleDepencencies, moduleTreeDepth)
# Now build a vtk graph.
g = GenerateVTKGraph(graph)
# Display it.
DisplayGraph(g)
if __name__ == '__main__':
main()
|
sumedhasingla/VTK
|
Utilities/Maintenance/VisualizeModuleDependencies.py
|
Python
|
bsd-3-clause
| 11,041
|
[
"VTK"
] |
1b1a2acf42871b654041c0e4b628594b7cbc94ed8e82f8f7f376a8ea88dc6504
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import os
import tempfile
class Ncl(Package):
"""NCL is an interpreted language designed specifically for
scientific data analysis and visualization. Supports NetCDF 3/4,
GRIB 1/2, HDF 4/5, HDF-EOD 2/5, shapefile, ASCII, binary.
Numerous analysis functions are built-in."""
homepage = "https://www.ncl.ucar.edu"
url = "https://github.com/NCAR/ncl/archive/6.4.0.tar.gz"
version('6.6.2', sha256='cad4ee47fbb744269146e64298f9efa206bc03e7b86671e9729d8986bb4bc30e')
version('6.5.0', sha256='133446f3302eddf237db56bf349e1ebf228240a7320699acc339a3d7ee414591')
version('6.4.0', sha256='0962ae1a1d716b182b3b27069b4afe66bf436c64c312ddfcf5f34d4ec60153c8')
patch('spack_ncl.patch')
# Make ncl compile with hdf5 1.10 (upstream as of 6.5.0)
patch('hdf5.patch', when="@6.4.0")
# ymake-filter's buffer may overflow (upstream as of 6.5.0)
patch('ymake-filter.patch', when="@6.4.0")
# ymake additional local library and includes will be filtered improperly
patch('ymake.patch', when="@6.4.0:")
# ncl does not build with gcc@10:
# https://github.com/NCAR/ncl/issues/123
patch('https://src.fedoraproject.org/rpms/ncl/raw/12778c55142b5b1ccc26dfbd7857da37332940c2/f/ncl-boz.patch', when='%gcc@10:', sha256='64f3502c9deab48615a4cbc26073173081c0774faf75778b044d251e45d238f7')
# This installation script is implemented according to this manual:
# http://www.ncl.ucar.edu/Download/build_from_src.shtml
variant('hdf4', default=False, description='Enable HDF4 support.')
variant('gdal', default=False, description='Enable GDAL support.')
variant('triangle', default=True, description='Enable Triangle support.')
variant('udunits2', default=True, description='Enable UDUNITS-2 support.')
variant('openmp', default=True, description='Enable OpenMP support.')
# Non-optional dependencies according to the manual:
depends_on('jpeg')
depends_on('netcdf-c')
depends_on('cairo+X+ft+pdf')
# Extra dependencies that may be missing from build system:
depends_on('bison', type='build')
depends_on('flex+lex')
depends_on('iconv')
depends_on('tcsh')
# Also, the manual says that ncl requires zlib, but that comes as a
# mandatory dependency of libpng, which is a mandatory dependency of cairo.
# The following dependencies are required, otherwise several components
# fail to compile:
depends_on('curl')
depends_on('iconv')
depends_on('libx11')
depends_on('libxaw')
depends_on('libxmu')
depends_on('pixman')
depends_on('bzip2')
depends_on('freetype')
depends_on('fontconfig')
# In Spack, we do not have an option to compile netcdf-c without netcdf-4
# support, so we will tell the ncl configuration script that we want
# support for netcdf-4, but the script assumes that hdf5 is compiled with
# szip support. We introduce this restriction with the following dependency
# statement.
depends_on('hdf5+szip')
depends_on('szip')
# ESMF is only required at runtime (for ESMF_regridding.ncl)
depends_on('esmf', type='run')
# In Spack, we also do not have an option to compile netcdf-c without DAP
# support, so we will tell the ncl configuration script that we have it.
# Some of the optional dependencies according to the manual:
depends_on('hdf', when='+hdf4')
depends_on('gdal+proj@:2.4', when='+gdal')
depends_on('udunits', when='+udunits2')
# We need src files of triangle to appear in ncl's src tree if we want
# triangle's features.
resource(
name='triangle',
url='http://www.netlib.org/voronoi/triangle.zip',
sha256='1766327add038495fa3499e9b7cc642179229750f7201b94f8e1b7bee76f8480',
placement='triangle_src',
when='+triangle')
sanity_check_is_file = ['bin/ncl']
def patch(self):
# Make configure scripts use Spack's tcsh
files = ['Configure'] + glob.glob('config/*')
filter_file('^#!/bin/csh -f', '#!/usr/bin/env csh', *files)
@run_before('install')
def filter_sbang(self):
# Filter sbang before install so Spack's sbang hook can fix it up
files = glob.glob('ncarg2d/src/bin/scripts/*')
files += glob.glob('ncarview/src/bin/scripts/*')
files += glob.glob('ni/src/scripts/*')
csh = join_path(self.spec['tcsh'].prefix.bin, 'csh')
filter_file('^#!/bin/csh', '#!{0}'.format(csh), *files)
def install(self, spec, prefix):
if (self.compiler.fc is None) or (self.compiler.cc is None):
raise InstallError('NCL package requires both '
'C and Fortran compilers.')
self.prepare_site_config()
self.prepare_install_config()
self.prepare_src_tree()
make('Everything', parallel=False)
def setup_run_environment(self, env):
env.set('NCARG_ROOT', self.spec.prefix)
def prepare_site_config(self):
fc_flags = []
cc_flags = []
c2f_flags = []
if '+openmp' in self.spec:
fc_flags.append(self.compiler.openmp_flag)
cc_flags.append(self.compiler.openmp_flag)
if self.compiler.name == 'gcc':
fc_flags.append('-fno-range-check')
c2f_flags.extend(['-lgfortran', '-lm'])
elif self.compiler.name == 'intel':
fc_flags.append('-fp-model precise')
cc_flags.append('-fp-model precise'
' -std=c99'
' -D_POSIX_C_SOURCE=2 -D_GNU_SOURCE')
c2f_flags.extend(['-lifcore', '-lifport'])
if self.spec.satisfies('%gcc@10:'):
fc_flags.append('-fallow-argument-mismatch')
cc_flags.append('-fcommon')
with open('./config/Spack', 'w') as f:
f.writelines([
'#define HdfDefines\n',
'#define CppCommand \'/usr/bin/env cpp -traditional\'\n',
'#define CCompiler cc\n',
'#define FCompiler fc\n',
('#define CtoFLibraries ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CtoFLibrariesUser ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CcOptions ' + ' '.join(cc_flags) + '\n'
if len(cc_flags) > 0
else ''),
('#define FcOptions ' + ' '.join(fc_flags) + '\n'
if len(fc_flags) > 0
else ''),
'#define BuildShared NO'
])
def prepare_install_config(self):
# Remove the results of the previous configuration attempts.
self.delete_files('./Makefile', './config/Site.local')
# Generate an array of answers that will be passed to the interactive
# configuration script.
config_answers = [
# Enter Return to continue
'\n',
# Build NCL?
'y\n',
# Parent installation directory :
self.spec.prefix + '\n',
# System temp space directory :
tempfile.gettempdir() + '\n',
# Build NetCDF4 feature support (optional)?
'y\n'
]
if '+hdf4' in self.spec:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'y\n',
# Also build HDF4 support (optional) into raster library?
'y\n',
# Did you build HDF4 with szip support?
'y\n' if self.spec.satisfies('^hdf+szip') else 'n\n'
])
else:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'n\n',
# Also build HDF4 support (optional) into raster library?
'n\n'
])
config_answers.extend([
# Build Triangle support (optional) into NCL
'y\n' if '+triangle' in self.spec else 'n\n',
# If you are using NetCDF V4.x, did you enable NetCDF-4 support?
'y\n',
# Did you build NetCDF with OPeNDAP support?
'y\n',
# Build GDAL support (optional) into NCL?
'y\n' if '+gdal' in self.spec else 'n\n',
# Build EEMD support (optional) into NCL?
'n\n',
# Build Udunits-2 support (optional) into NCL?
'y\n' if '+uduints2' in self.spec else 'n\n',
# Build Vis5d+ support (optional) into NCL?
'n\n',
# Build HDF-EOS2 support (optional) into NCL?
'n\n',
# Build HDF5 support (optional) into NCL?
'y\n',
# Build HDF-EOS5 support (optional) into NCL?
'n\n',
# Build GRIB2 support (optional) into NCL?
'n\n',
# Enter local library search path(s) :
self.spec['fontconfig'].prefix.lib + ' ' +
self.spec['pixman'].prefix.lib + ' ' +
self.spec['bzip2'].prefix.lib + '\n',
# Enter local include search path(s) :
# All other paths will be passed by the Spack wrapper.
join_path(self.spec['freetype'].prefix.include, 'freetype2') +
'\n',
# Go back and make more changes or review?
'n\n',
# Save current configuration?
'y\n'
])
config_answers_filename = 'spack-config.in'
config_script = Executable('./Configure')
with open(config_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
config_script(input=f)
def prepare_src_tree(self):
if '+triangle' in self.spec:
triangle_src = join_path(self.stage.source_path, 'triangle_src')
triangle_dst = join_path(self.stage.source_path, 'ni', 'src',
'lib', 'hlu')
copy(join_path(triangle_src, 'triangle.h'), triangle_dst)
copy(join_path(triangle_src, 'triangle.c'), triangle_dst)
@staticmethod
def delete_files(*filenames):
for filename in filenames:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError as e:
raise InstallError('Failed to delete file %s: %s' % (
e.filename, e.strerror))
|
rspavel/spack
|
var/spack/repos/builtin/packages/ncl/package.py
|
Python
|
lgpl-2.1
| 10,773
|
[
"NetCDF"
] |
f0237e6ffbd4ce06a5be2c9460e6aa664b88134589f6bc1ae1ac50bdde7febf4
|
#!/usr/bin/python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2004, 2005, 2006, 2007 Red Hat, Inc.
#
# Author: Paul Nasrat, Florian La Roche, Phil Knirsch, Thomas Woerner,
# Florian Festi
#
#
# Read .rpm packages from python. Implemented completely in python without
# using the librpm C library. Use "oldpyrpm.py -h" to get a list of possible
# options and http://people.redhat.com/laroche/pyrpm/ also has some docu.
# This python script depends on libxml2 and urlgrabber for some functionality.
#
# Tested with all rpm packages from RHL5.2, 6.x, 7.x, 8.0, 9,
# Fedora Core 1/2/3/4/5/6/development, Fedora Extras, livna, freshrpms,
# Mandriva 10.2, Open SuSE 10RC1 and other distributions.
# No output should be generated if rpm packages are verified via this python
# implementation, so all possible quirks in the binary packages are taken
# care of and can be read in this code.
#
# Known problem areas of /bin/rpm and this pyrpm script:
# - Signing a second time can corrupt packages with older /bin/rpm releases.
# - Packages built with a broken kernel that does not mmap() files with
# size 0 just have that filemd5sum set to "" and "rpm -V" also fails.
# - Verify mode warns about a few packages from RHL5.x (rpm-2.x).
# - CentOS 3.7/ia64 and 4.3/alpha rpm packages cannot be read, might be
# endian problems showing up?
#
#
# TODO:
# git repos:
# - Optionally import the full tree for the initial import (e.g. FC releases).
# - Optionally also sort by time for e.g. FC updates dirs.
# general:
# - Separate out the parts which are distro-specific and support more
# distro variants.
# - Should we get rid of doRead()?
# - How todo save shell escapes for os.system()
# - Better error handling in PyGZIP.
# - streaming read for cpio files
# - use setPerms() in doLnOrCopy()
# - Change "strict" and "verify" into "debug/verbose" and have one integer
# specify debug and output levels. (Maybe also "nodigest" can move in?)
# - Whats the difference between "cookie" and "buildhost" + "buildtime".
# - Locking done for read/write rpmdb access. Check /bin/rpm and yum.
# - Should we delete the __db* cache files? What about db4 config settings?
# rpm header:
# - write tag 61 if no region tag exists
# - --checkrpmdb --enablerepos: allow changed signatures in packages
# - check against current upstream rpm development
# - Can doVerify() be called on rpmdb data or if the sig header is
# missing?
# - allow a --rebuilddb into a new directory and a diff between two rpmdb
# - rpmdb cleanup to remove duplicate rpm entries (yum-utils has this already)
# - check OpenGPG signatures
# - allow src.rpm selection based on OpenGPG signature. Prefer GPG signed.
# - add a new writeHeader2() that copies the existing rpm header and then
# writes new entries to the end. This is more robust than the existing
# writeHeader().
# - For reading rpmdb we could first try to detect the region tag, then
# read all additional added tags into a separate hash. This would really
# clean up data handling and how duplicate tags are taken care off.
# This should now be done on the write side, the reading part should
# probably stay as it is.
# - Bring extractCpio and verifyCpio closer together again.
# - Cpio extract should only set hardlinks for files who have already shown
# up in the cpio earlier (before the data).
# - i386 rpm extraction on ia64? (This is stored like relocated rpms in
# duplicated file tags.)
# yum.conf/repos:
# - For repos stop switching to another server once repomd.xml is read in.
# Currently also all rpms are associated/hardcoded with the first server.
# Rpms could be cached regardless of the mirror they come from. By sha1sum?
# - cacheLocal:
# - If we get data from several mirrors, how should we combine data without
# refetching data too often or overriding data too soon? If we know a
# checksum for all data, we could store data by checksum filename?
# - check if local files really exist with os.stat()?,
# maybe only once per repo? (for repo sorting)
# - sort several urls to list local ones first, add mirror speed check
# - Read the complete file into memory, no local files for: repomd.xml
# and mirrorlist.
# - Can we cache the mirrorlist for some time?
# things to be noted, probably not getting fixed:
# - "badsha1_2" has the size added in reversed order to compute the final
# sha1 sum. A patch to python shamodule.c could allow to verify also these
# old broken entries.
# things that look even less important to implement:
# - add streaming support to bzip2 compressed payload
# - lua scripting support
# possible changes for /bin/rpm:
# - Do not generate filecontexts tags if they are empty, maybe not at all.
# - "rhnplatform" could go away if it is not required.
#
__version__ = "0.90"
__doc__ = """Manage everything around Linux RPM packages."""
# Look at "pylint --list-msgs" to find these out:
# pylint: disable-msg=C0103,C0111
# pylint: disable-msg=R0902,R0903,R0911,R0912,R0913,R0914,R0915
# pylint: disable-msg=W0142,W0201,W0511,W0704
import sys
if sys.version_info < (2, 2):
sys.exit("error: Python 2.2 or later required")
import os, os.path, zlib, gzip, errno, re, time, signal
from types import IntType, ListType
from struct import pack, unpack
if sys.version_info < (3, 0):
import md5
import sha as sha1
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from hashlib import md5, sha1
from io import StringIO
uselibxml = 0
try:
# python-2.5 layout:
from xml.etree.cElementTree import iterparse
except ImportError:
try:
# often older python versions add this to site-packages:
from cElementTree import iterparse
except ImportError:
try:
# maybe the python-only version is available?
from ElementTree import iterparse
except ImportError:
# ok, we give up and use libxml then:
uselibxml = 1
#if uselibxml:
try:
import libxml2
TYPE_ELEMENT = libxml2.XML_READER_TYPE_ELEMENT
TYPE_END_ELEMENT = libxml2.XML_READER_TYPE_END_ELEMENT
except ImportError:
print "libxml2 is not imported, do not try to use repodata."
# python-only
if sys.version_info < (2, 3):
from types import StringType
basestring = StringType # pylint: disable-msg=W0622
TMP_MAX = 10000
from random import Random
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789-_")
def __init__(self):
self.rng = Random()
self.normcase = os.path.normcase
def __iter__(self):
return self
def next(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for _ in "123456"]
return self.normcase("".join(letters))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence # pylint: disable-msg=W0603
if _name_sequence == None:
_name_sequence = _RandomNameSequence()
return _name_sequence
else:
from tempfile import _get_candidate_names, TMP_MAX
# python-only-end
# pyrex-code
#from tempfile import _get_candidate_names, TMP_MAX
#cdef extern from "string.h":
# int strlen(char *)
#cdef extern from "netinet/in.h":
# unsigned int ntohl(unsigned int netlong)
#cdef extern from "Python.h":
# object PyString_FromStringAndSize(char *s, int len)
# pyrex-code-end
# optimized routines instead of:
#from stat import S_ISREG, S_ISLNK, S_ISDIR, S_ISFIFO, S_ISCHR, \
# S_ISBLK, S_ISSOCK
# python-only
def S_ISREG(mode):
return (mode & 0170000) == 0100000
def S_ISLNK(mode):
return (mode & 0170000) == 0120000
def S_ISDIR(mode):
return (mode & 0170000) == 0040000
def S_ISFIFO(mode):
return (mode & 0170000) == 0010000
def S_ISCHR(mode):
return (mode & 0170000) == 0020000
def S_ISBLK(mode):
return (mode & 0170000) == 0060000
def S_ISSOCK(mode):
return (mode & 0170000) == 0140000
# python-only-end
# pyrex-code
#cdef S_ISREG(int mode):
# return (mode & 0170000) == 0100000
#cdef S_ISLNK(int mode):
# return (mode & 0170000) == 0120000
#cdef S_ISDIR(int mode):
# return (mode & 0170000) == 0040000
#cdef S_ISFIFO(int mode):
# return (mode & 0170000) == 0010000
#cdef S_ISCHR(int mode):
# return (mode & 0170000) == 0020000
#cdef S_ISBLK(int mode):
# return (mode & 0170000) == 0060000
#cdef S_ISSOCK(int mode):
# return (mode & 0170000) == 0140000
# pyrex-code-end
# Use this filename prefix for all temp files to be able
# to search them and delete them again if they are left
# over from killed processes.
tmpprefix = "..pyrpm"
tmpdir = os.environ.get("TMPDIR", "/tmp")
openflags = os.O_RDWR | os.O_CREAT | os.O_EXCL
if hasattr(os, "O_NOINHERIT"):
openflags |= os.O_NOINHERIT # pylint: disable-msg=E1101
if hasattr(os, "O_NOFOLLOW"):
openflags |= os.O_NOFOLLOW # pylint: disable-msg=E1101
def mkstemp_file(dirname, pre=tmpprefix, special=0):
names = _get_candidate_names()
for _ in xrange(TMP_MAX):
name = names.next()
filename = "%s/%s.%s" % (dirname, pre, name)
try:
if special:
fd = open(filename, "wb")
else:
fd = os.open(filename, openflags, 0600)
#_set_cloexec(fd)
return (fd, filename)
except OSError, e:
if e.errno == errno.EEXIST:
continue # try again
raise
raise IOError, (errno.EEXIST, "No usable temporary file name found")
def mkstemp_link(dirname, pre, linkfile):
names = _get_candidate_names()
for _ in xrange(TMP_MAX):
name = names.next()
filename = "%s/%s.%s" % (dirname, pre, name)
try:
os.link(linkfile, filename)
return filename
except OSError, e:
if e.errno == errno.EEXIST:
continue # try again
# make sure we have a fallback if hardlinks cannot be done
# on this partition
if e.errno in (errno.EXDEV, errno.EPERM):
return None
raise
raise IOError, (errno.EEXIST, "No usable temporary file name found")
def mkstemp_dir(dirname, pre=tmpprefix):
names = _get_candidate_names()
for _ in xrange(TMP_MAX):
name = names.next()
filename = "%s/%s.%s" % (dirname, pre, name)
try:
os.mkdir(filename)
return filename
except OSError, e:
if e.errno == errno.EEXIST:
continue # try again
raise
raise IOError, (errno.EEXIST, "No usable temporary file name found")
def mkstemp_symlink(dirname, pre, symlinkfile):
names = _get_candidate_names()
for _ in xrange(TMP_MAX):
name = names.next()
filename = "%s/%s.%s" % (dirname, pre, name)
try:
os.symlink(symlinkfile, filename)
return filename
except OSError, e:
if e.errno == errno.EEXIST:
continue # try again
raise
raise IOError, (errno.EEXIST, "No usable temporary file name found")
def mkstemp_mkfifo(dirname, pre):
names = _get_candidate_names()
for _ in xrange(TMP_MAX):
name = names.next()
filename = "%s/%s.%s" % (dirname, pre, name)
try:
os.mkfifo(filename)
return filename
except OSError, e:
if e.errno == errno.EEXIST:
continue # try again
raise
raise IOError, (errno.EEXIST, "No usable temporary file name found")
def mkstemp_mknod(dirname, pre, mode, rdev):
names = _get_candidate_names()
for _ in xrange(TMP_MAX):
name = names.next()
filename = "%s/%s.%s" % (dirname, pre, name)
try:
os.mknod(filename, mode, rdev)
return filename
except OSError, e:
if e.errno == errno.EEXIST:
continue # try again
raise
raise IOError, (errno.EEXIST, "No usable temporary file name found")
def doLnOrCopy(src, dst):
"""Hardlink or copy a file "src" to a new file "dst"."""
dstdir = pathdirname(dst)
tmp = mkstemp_link(dstdir, tmpprefix, src)
if tmp == None:
# no hardlink possible, copy the data into a new file
(fd, tmp) = mkstemp_file(dstdir)
fsrc = open(src, "rb")
while 1:
buf = fsrc.read(16384)
if not buf:
break
os.write(fd, buf)
fsrc.close()
os.close(fd)
st = os.stat(src)
os.utime(tmp, (st.st_atime, st.st_mtime))
os.chmod(tmp, st.st_mode & 0170000)
if os.geteuid() == 0:
os.lchown(tmp, st.st_uid, st.st_gid)
os.rename(tmp, dst)
def doRead(fd, size):
data = fd.read(size)
if len(data) != size:
raise IOError, "failed to read data (%d instead of %d)" \
% (len(data), size)
return data
def getChecksum(fd, digest="md5"):
if isinstance(fd, basestring):
try:
fd = open(fd, "rb")
except IOError:
return None
if digest == "md5":
ctx = md5.new()
else:
ctx = sha1.new()
while 1:
data = fd.read(16384)
if not data:
break
ctx.update(data)
return ctx.hexdigest()
def getMD5(fpath):
return getChecksum(fpath, "md5")
supported_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGHUP]
#supported_signals.extend([signal.SIGSEGV, signal.SIGBUS, signal.SIGABRT,
# signal.SIGILL, signal.SIGFPE])
def setSignals(handler, supported_signals2):
signals = {}
for key in supported_signals2:
signals[key] = signal.signal(key, handler)
return signals
def blockSignals():
return setSignals(signal.SIG_IGN, supported_signals)
def resetSignals(signals):
for (key, value) in signals.iteritems():
signal.signal(key, value)
def _doprint(msg):
sys.stdout.write(msg)
sys.stdout.flush()
class PrintHash:
"""'numobjects' indicates how often we will call.nectObject()
and 'hashlength' gives the number of '#' hash chars we want to
output. """
def __init__(self, numobjects=100, hashlength=30):
# Make sure we don't get a division by zero.
if not numobjects:
numobjects = 1
self.numobjects = numobjects
self.hashlength = hashlength
self.num = 0
# Immediately output something to indicate a first start:
self.hashpos = 1
_doprint("#")
def nextObject(self, finish=None):
if finish:
# Output the rest of the hashes now:
npos = self.hashlength
else:
self.num += 1
npos = (self.num * self.hashlength) // self.numobjects
# In case we call .nextObject() too often:
if npos > self.hashlength:
npos = self.hashlength
msg = ""
if self.hashpos < npos:
msg = "#" * (npos - self.hashpos)
self.hashpos = npos
if finish:
msg += "\n"
if msg:
_doprint(msg)
# Optimized routines that use zlib to extract data, since
# "import gzip" doesn't give good data handling (old code
# can still easily be enabled to compare performance):
class PyGZIP:
def __init__(self, filename, fd, datasize, readsize):
self.filename = filename
if fd == None:
fd = open(filename, "rb")
self.fd = fd
self.length = 0 # length of all decompressed data
self.length2 = datasize
self.readsize = readsize
if self.readsize != None:
self.readsize -= 10
self.enddata = "" # remember last 8 bytes for crc/length check
self.pos = 0
self.data = ""
data = doRead(self.fd, 10)
if data[:3] != "\037\213\010":
raise ValueError, "Not a gzipped file: %s" % self.filename
# flag (1 byte), modification time (4 bytes), extra flags (1), OS (1)
flag = ord(data[3])
if flag & 4: # extra field
xlen = ord(self.fd.read(1))
xlen += 256 * ord(self.fd.read(1))
doRead(self.fd, xlen)
if self.readsize != None:
self.readsize -= 2 + xlen
if flag & 8: # filename
while self.fd.read(1) != "\000":
if self.readsize != None:
self.readsize -= 1
if self.readsize != None:
self.readsize -= 1
if flag & 16: # comment string
while self.fd.read(1) != "\000":
if self.readsize != None:
self.readsize -= 1
if self.readsize != None:
self.readsize -= 1
if flag & 2:
doRead(self.fd, 2) # 16-bit header CRC
if self.readsize != None:
self.readsize -= 2
self.decompobj = zlib.decompressobj(-zlib.MAX_WBITS)
self.crcval = zlib.crc32("")
def read(self, bytes):
decompdata = []
obj = self.decompobj
while bytes:
if self.data:
if len(self.data) - self.pos <= bytes:
decompdata.append(self.data[self.pos:])
bytes -= len(self.data) - self.pos
self.data = ""
continue
end = self.pos + bytes
decompdata.append(self.data[self.pos:end])
self.pos = end
break
readsize = 32768
if self.readsize != None and self.readsize < 32768:
readsize = self.readsize
data = self.fd.read(readsize)
if not data:
break
if self.readsize != None:
self.readsize -= len(data)
if len(data) >= 8:
self.enddata = data[-8:]
else:
self.enddata = self.enddata[len(data) - 8:] + data
x = obj.decompress(data)
self.crcval = zlib.crc32(x, self.crcval)
self.length += len(x)
if len(x) <= bytes:
bytes -= len(x)
decompdata.append(x)
else:
decompdata.append(x[:bytes])
self.data = x
self.pos = bytes
break
return "".join(decompdata)
def printErr(self, err):
print "%s: %s" % (self.filename, err)
def __del__(self):
# Sanity check.
if self.data:
self.printErr("PyGZIP: bytes left to read: %d" % \
(len(self.data) - self.pos))
if self.readsize != None:
# zlib sometimes adds one or two additional bytes that it also
# does not need to decompress all data again.
data = doRead(self.fd, 8 + self.readsize)
self.enddata = data[-8:]
else:
data = self.fd.read()
if len(data) >= 8:
self.enddata = data[-8:]
else:
self.enddata = self.enddata[len(data) - 8:] + data
(crc32, isize) = unpack("<iI", self.enddata)
if crc32 != self.crcval:
print self.filename, "CRC check failed:", crc32, self.crcval
if isize != self.length:
print self.filename, "Incorrect length of data produced:", \
isize, self.length
if isize != self.length2 and self.length2 != None:
print self.filename, "Incorrect length of data produced:", \
self.length2
class GzipFile(gzip.GzipFile):
def _write_gzip_header(self):
self.fileobj.write("\037\213\010") # magic header + compression method
fname = self.filename[:-3]
flags = "\000"
if fname:
flags = "\010"
self.fileobj.write(flags + "\000\000\000\000\002\377")
if fname:
self.fileobj.write(fname + "\000")
cachedir = "/var/cache/pyrpm/"
opensuse = 0
def setProxyOptions(o):
import urlparse
proxies = {}
proxy_string = o.get("proxy", None)
if proxy_string not in (None, "", "_none_"):
proxy_username = o.get("proxy_username", None)
if proxy_username != None:
password = o.get("proxy_password", "")
if password:
password = ":" + password
parsed = urlparse.urlsplit(proxy_string, allow_fragments=0)
proxy_string = "%s://%s%s@%s%s" % (parsed[0], proxy_username,
password, parsed[1], parsed[2] + "?" + parsed[3])
proxies["http"] = proxy_string
proxies["https"] = proxy_string
proxies["ftp"] = proxy_string
o["proxies"] = proxies
def setOptions(yumconf={}, repo=None): # pylint: disable-msg=W0102
# Default values:
o = {
"timeout": "20.0",
"keepalive": "0",
"retries": "3",
"http_caching": "all",
"proxy": None,
"proxy_username": None,
"proxy_password": None,
# Set the proxy settings from above into urlgrabber data:
"proxies": {},
"http_headers": None
}
# Override with "main" settings:
for (key, value) in yumconf.get("main", {}).iteritems():
o[key] = value
# Override with repo-specific settings:
for (key, value) in yumconf.get(repo, {}).iteritems():
o[key] = value
# Set proxy items:
setProxyOptions(o)
# Set http headers:
headers = []
# If we should not cache and we don't already contain
# a Pragma header, then add it...
nocache = ("Pragma", "no-cache")
if o["http_caching"] != "all" and nocache not in headers:
headers.append(nocache)
o["http_headers"] = headers
return o
urloptions = setOptions()
# rpm tag types
#RPM_NULL = 0
RPM_CHAR = 1
RPM_INT8 = 2 # currently unused
RPM_INT16 = 3
RPM_INT32 = 4
RPM_INT64 = 5 # currently unused
RPM_STRING = 6
RPM_BIN = 7
RPM_STRING_ARRAY = 8
RPM_I18NSTRING = 9
# new types internal to this tool:
# RPM_STRING_ARRAY for app + params, otherwise a single RPM_STRING
RPM_ARGSTRING = 12
RPM_GROUP = 13
# pyrex-code
#cdef int cRPM_CHAR, cRPM_INT8, cRPM_INT16, cRPM_INT32, cRPM_INT64
#cdef int cRPM_STRING, cRPM_BIN, cRPM_STRING_ARRAY, cRPM_I18NSTRING
#cdef int cRPM_ARGSTRING, cRPM_GROUP
#cRPM_CHAR = 1
#cRPM_INT8 = 2
#cRPM_INT16 = 3
#cRPM_INT32 = 4
#cRPM_INT64 = 5
#cRPM_STRING = 6
#cRPM_BIN = 7
#cRPM_STRING_ARRAY = 8
#cRPM_I18NSTRING = 9
#cRPM_ARGSTRING = 12
#cRPM_GROUP = 13
# pyrex-code-end
# RPMSENSEFLAGS
RPMSENSE_ANY = 0
RPMSENSE_SERIAL = (1 << 0) # legacy
RPMSENSE_LESS = (1 << 1)
RPMSENSE_GREATER = (1 << 2)
RPMSENSE_EQUAL = (1 << 3)
RPMSENSE_PROVIDES = (1 << 4) # only used internally by builds
RPMSENSE_CONFLICTS = (1 << 5) # only used internally by builds
RPMSENSE_PREREQ = (1 << 6) # legacy
RPMSENSE_OBSOLETES = (1 << 7) # only used internally by builds
RPMSENSE_INTERP = (1 << 8) # Interpreter used by scriptlet.
RPMSENSE_SCRIPT_PRE = ((1 << 9) | RPMSENSE_PREREQ) # %pre dependency
RPMSENSE_SCRIPT_POST = ((1 << 10)|RPMSENSE_PREREQ) # %post dependency
RPMSENSE_SCRIPT_PREUN = ((1 << 11)|RPMSENSE_PREREQ) # %preun dependency
RPMSENSE_SCRIPT_POSTUN = ((1 << 12)|RPMSENSE_PREREQ) # %postun dependency
RPMSENSE_SCRIPT_VERIFY = (1 << 13) # %verify dependency
RPMSENSE_FIND_REQUIRES = (1 << 14) # find-requires generated dependency
RPMSENSE_FIND_PROVIDES = (1 << 15) # find-provides generated dependency
RPMSENSE_TRIGGERIN = (1 << 16) # %triggerin dependency
RPMSENSE_TRIGGERUN = (1 << 17) # %triggerun dependency
RPMSENSE_TRIGGERPOSTUN = (1 << 18) # %triggerpostun dependency
RPMSENSE_MISSINGOK = (1 << 19) # suggests/enhances/recommends hint
RPMSENSE_SCRIPT_PREP = (1 << 20) # %prep build dependency
RPMSENSE_SCRIPT_BUILD = (1 << 21) # %build build dependency
RPMSENSE_SCRIPT_INSTALL = (1 << 22) # %install build dependency
RPMSENSE_SCRIPT_CLEAN = (1 << 23) # %clean build dependency
RPMSENSE_RPMLIB = ((1 << 24) | RPMSENSE_PREREQ) # rpmlib(feature) dependency
RPMSENSE_TRIGGERPREIN = (1 << 25) # @todo Implement %triggerprein
RPMSENSE_KEYRING = (1 << 26)
RPMSENSE_PATCHES = (1 << 27)
RPMSENSE_CONFIG = (1 << 28)
RPMSENSE_SENSEMASK = 15 # Mask to get senses: serial, less, greater, equal.
RPMSENSE_TRIGGER = (RPMSENSE_TRIGGERIN | RPMSENSE_TRIGGERUN
| RPMSENSE_TRIGGERPOSTUN)
_ALL_REQUIRES_MASK = (RPMSENSE_INTERP | RPMSENSE_SCRIPT_PRE
| RPMSENSE_SCRIPT_POST | RPMSENSE_SCRIPT_PREUN | RPMSENSE_SCRIPT_POSTUN
| RPMSENSE_SCRIPT_VERIFY | RPMSENSE_FIND_REQUIRES | RPMSENSE_SCRIPT_PREP
| RPMSENSE_SCRIPT_BUILD | RPMSENSE_SCRIPT_INSTALL | RPMSENSE_SCRIPT_CLEAN
| RPMSENSE_RPMLIB | RPMSENSE_KEYRING)
def _notpre(x):
return (x & ~RPMSENSE_PREREQ)
_INSTALL_ONLY_MASK = _notpre(RPMSENSE_SCRIPT_PRE | RPMSENSE_SCRIPT_POST
| RPMSENSE_RPMLIB | RPMSENSE_KEYRING)
_ERASE_ONLY_MASK = _notpre(RPMSENSE_SCRIPT_PREUN | RPMSENSE_SCRIPT_POSTUN)
def isLegacyPreReq(x):
return (x & _ALL_REQUIRES_MASK) == RPMSENSE_PREREQ
def isInstallPreReq(x):
return (x & _INSTALL_ONLY_MASK) != 0
def isErasePreReq(x):
return (x & _ERASE_ONLY_MASK) != 0
# RPM file attributes
RPMFILE_NONE = 0
RPMFILE_CONFIG = (1 << 0) # from %%config
RPMFILE_DOC = (1 << 1) # from %%doc
RPMFILE_ICON = (1 << 2) # from %%donotuse.
RPMFILE_MISSINGOK = (1 << 3) # from %%config(missingok)
RPMFILE_NOREPLACE = (1 << 4) # from %%config(noreplace)
RPMFILE_SPECFILE = (1 << 5) # .spec file in source rpm
RPMFILE_GHOST = (1 << 6) # from %%ghost
RPMFILE_LICENSE = (1 << 7) # from %%license
RPMFILE_README = (1 << 8) # from %%readme
RPMFILE_EXCLUDE = (1 << 9) # from %%exclude, internal
RPMFILE_UNPATCHED = (1 << 10) # placeholder (SuSE)
RPMFILE_PUBKEY = (1 << 11) # from %%pubkey
RPMFILE_POLICY = (1 << 12) # from %%policy
# List of all rpm tags we care about. We mark older tags which are
# not anymore in newer rpm packages (Fedora Core development tree) as
# "legacy".
# tagname: [tag, type, how-many, flags:legacy=1,
# src-only=2,bin-only=4,signed-int=8]
rpmtag = {
# basic info
"name": [1000, RPM_STRING, None, 0],
"epoch": [1003, RPM_INT32, 1, 0],
"version": [1001, RPM_STRING, None, 0],
"release": [1002, RPM_STRING, None, 0],
"arch": [1022, RPM_STRING, None, 0],
# dependencies: provides, requires, obsoletes, conflicts
"providename": [1047, RPM_STRING_ARRAY, None, 0],
"provideflags": [1112, RPM_INT32, None, 0],
"provideversion": [1113, RPM_STRING_ARRAY, None, 0],
"requirename": [1049, RPM_STRING_ARRAY, None, 0],
"requireflags": [1048, RPM_INT32, None, 0],
"requireversion": [1050, RPM_STRING_ARRAY, None, 0],
"obsoletename": [1090, RPM_STRING_ARRAY, None, 4],
"obsoleteflags": [1114, RPM_INT32, None, 4],
"obsoleteversion": [1115, RPM_STRING_ARRAY, None, 4],
"conflictname": [1054, RPM_STRING_ARRAY, None, 0],
"conflictflags": [1053, RPM_INT32, None, 0],
"conflictversion": [1055, RPM_STRING_ARRAY, None, 0],
# triggers:
"triggername": [1066, RPM_STRING_ARRAY, None, 4],
"triggerflags": [1068, RPM_INT32, None, 4],
"triggerversion": [1067, RPM_STRING_ARRAY, None, 4],
"triggerscripts": [1065, RPM_STRING_ARRAY, None, 4],
"triggerscriptprog": [1092, RPM_STRING_ARRAY, None, 4],
"triggerindex": [1069, RPM_INT32, None, 4],
# scripts
"prein": [1023, RPM_STRING, None, 4],
"preinprog": [1085, RPM_ARGSTRING, None, 4],
"postin": [1024, RPM_STRING, None, 4],
"postinprog": [1086, RPM_ARGSTRING, None, 4],
"preun": [1025, RPM_STRING, None, 4],
"preunprog": [1087, RPM_ARGSTRING, None, 4],
"postun": [1026, RPM_STRING, None, 4],
"postunprog": [1088, RPM_ARGSTRING, None, 4],
"verifyscript": [1079, RPM_STRING, None, 4],
"verifyscriptprog": [1091, RPM_ARGSTRING, None, 4],
"pretrans": [1151, RPM_STRING, None, 4],
"posttrans": [1152, RPM_STRING, None, 4],
"pretransprog": [1153, RPM_ARGSTRING, None, 4],
"posttransprog": [1154, RPM_ARGSTRING, None, 4],
# addon information:
"rpmversion": [1064, RPM_STRING, None, 0],
"payloadformat": [1124, RPM_STRING, None, 0], # "cpio"
"payloadcompressor": [1125, RPM_STRING, None, 0],# "gzip" or "bzip2"
"i18ntable": [100, RPM_STRING_ARRAY, None, 0], # list of available langs
"summary": [1004, RPM_I18NSTRING, None, 0],
"description": [1005, RPM_I18NSTRING, None, 0],
"url": [1020, RPM_STRING, None, 0],
"license": [1014, RPM_STRING, None, 0],
"sourcerpm": [1044, RPM_STRING, None, 4], # name of src.rpm for binary rpms
"changelogtime": [1080, RPM_INT32, None, 8],
"changelogname": [1081, RPM_STRING_ARRAY, None, 0],
"changelogtext": [1082, RPM_STRING_ARRAY, None, 0],
"prefixes": [1098, RPM_STRING_ARRAY, None, 4], # relocatable rpm packages
"optflags": [1122, RPM_STRING, None, 4], # optimization flags for gcc
"pubkeys": [266, RPM_STRING_ARRAY, None, 4],
"sourcepkgid": [1146, RPM_BIN, 16, 4], # md5 from srpm (header+payload)
"immutable": [63, RPM_BIN, 16, 0],
# less important information:
"buildtime": [1006, RPM_INT32, 1, 8], # time of rpm build
"buildhost": [1007, RPM_STRING, None, 0], # hostname where rpm was built
"cookie": [1094, RPM_STRING, None, 0], # build host and time
"group": [1016, RPM_GROUP, None, 0], # comps.xml/groupfile is used now
"size": [1009, RPM_INT32, 1, 0], # sum of all file sizes
"distribution": [1010, RPM_STRING, None, 0],
"vendor": [1011, RPM_STRING, None, 0],
"packager": [1015, RPM_STRING, None, 0],
"os": [1021, RPM_STRING, None, 0], # always "linux"
"payloadflags": [1126, RPM_STRING, None, 0], # "9"
"rhnplatform": [1131, RPM_STRING, None, 4], # == arch
"platform": [1132, RPM_STRING, None, 0],
# rpm source packages:
"source": [1018, RPM_STRING_ARRAY, None, 2],
"patch": [1019, RPM_STRING_ARRAY, None, 2],
"buildarchs": [1089, RPM_STRING_ARRAY, None, 2],
"excludearch": [1059, RPM_STRING_ARRAY, None, 2],
"exclusivearch": [1061, RPM_STRING_ARRAY, None, 2],
"exclusiveos": [1062, RPM_STRING_ARRAY, None, 2], # ["Linux"] or ["linux"]
# information about files
"dirindexes": [1116, RPM_INT32, None, 0],
"dirnames": [1118, RPM_STRING_ARRAY, None, 0],
"basenames": [1117, RPM_STRING_ARRAY, None, 0],
"fileusername": [1039, RPM_STRING_ARRAY, None, 0],
"filegroupname": [1040, RPM_STRING_ARRAY, None, 0],
"filemodes": [1030, RPM_INT16, None, 0],
"filemtimes": [1034, RPM_INT32, None, 8],
"filedevices": [1095, RPM_INT32, None, 0],
"fileinodes": [1096, RPM_INT32, None, 0],
"filesizes": [1028, RPM_INT32, None, 0],
"filemd5s": [1035, RPM_STRING_ARRAY, None, 0],
"filerdevs": [1033, RPM_INT16, None, 0],
"filelinktos": [1036, RPM_STRING_ARRAY, None, 0],
"fileflags": [1037, RPM_INT32, None, 0],
# less common used data:
"fileverifyflags": [1045, RPM_INT32, None, 0],
"filelangs": [1097, RPM_STRING_ARRAY, None, 0],
"filecolors": [1140, RPM_INT32, None, 0],
"fileclass": [1141, RPM_INT32, None, 0],
"filedependsx": [1143, RPM_INT32, None, 0],
"filedependsn": [1144, RPM_INT32, None, 0],
"classdict": [1142, RPM_STRING_ARRAY, None, 0],
"dependsdict": [1145, RPM_INT32, None, 0],
# data from files marked with "%policy" in specfiles
"policies": [1150, RPM_STRING_ARRAY, None, 0],
"filecontexts": [1147, RPM_STRING_ARRAY, None, 0], # selinux filecontexts
# tags not in Fedora Core development trees anymore:
"capability": [1105, RPM_INT32, None, 1],
"xpm": [1013, RPM_BIN, None, 1],
"gif": [1012, RPM_BIN, None, 1],
# bogus RHL5.2 data in XFree86-libs, ash, pdksh
"verifyscript2": [15, RPM_STRING, None, 1],
"nosource": [1051, RPM_INT32, None, 1],
"nopatch": [1052, RPM_INT32, None, 1],
"disturl": [1123, RPM_STRING, None, 1],
"oldfilenames": [1027, RPM_STRING_ARRAY, None, 1],
"triggerin": [1100, RPM_STRING, None, 5],
"triggerun": [1101, RPM_STRING, None, 5],
"triggerpostun": [1102, RPM_STRING, None, 5],
"archivesize": [1046, RPM_INT32, 1, 1],
# tags used in openSuSE:
"suggestsname": [1156, RPM_STRING_ARRAY, None, 5],
"suggestsversion": [1157, RPM_STRING_ARRAY, None, 5],
"suggestsflags": [1158, RPM_INT32, None, 5],
"enhancesname": [1159, RPM_STRING_ARRAY, None, 5],
"enhancesversion": [1160, RPM_STRING_ARRAY, None, 5],
"enhancesflags": [1161, RPM_INT32, None, 5],
}
# Add a reverse mapping for all tags plus the name again.
for _v in rpmtag.keys():
rpmtag[_v].append(_v)
for _v in rpmtag.values():
rpmtag[_v[0]] = _v
if len(_v) != 5:
raise ValueError, "rpmtag has wrong entries"
del _v
# Additional tags which can be in the rpmdb /var/lib/rpm/Packages.
# Some of these have the data copied over from the signature
# header which is not stored in rpmdb.
rpmdbtag = {
"origdirindexes": [1119, RPM_INT32, None, 1],
"origdirnames": [1121, RPM_STRING_ARRAY, None, 1],
"origbasenames": [1120, RPM_STRING_ARRAY, None, 1],
"install_size_in_sig": [257, RPM_INT32, 1, 0],
"install_md5": [261, RPM_BIN, 16, 0],
"install_gpg": [262, RPM_BIN, None, 0],
"install_dsaheader": [267, RPM_BIN, 16, 0],
"install_sha1header": [269, RPM_STRING, None, 0],
"installtime": [1008, RPM_INT32, 1, 8],
"filestates": [1029, RPM_CHAR, None, 0],
# set for relocatable packages
"instprefixes": [1099, RPM_STRING_ARRAY, None, 0],
# installcolor is set at /bin/rpm compile time based on arch
"installcolor": [1127, RPM_INT32, None, 0],
# unique number per installed rpm package
"installtid": [1128, RPM_INT32, None, 0],
"install_badsha1_1": [264, RPM_STRING, None, 1],
"install_badsha1_2": [265, RPM_STRING, None, 1],
"immutable1": [61, RPM_BIN, 16, 1]
}
# List of special rpmdb tags, like also visible above.
install_keys = {}
for _v in rpmdbtag.keys():
install_keys[_v] = 1
rpmdbtag[_v].append(_v)
for _v in rpmdbtag.values():
rpmdbtag[_v[0]] = _v
if len(_v) != 5:
raise ValueError, "rpmdbtag has wrong entries"
for _v in rpmtag.keys():
rpmdbtag[_v] = rpmtag[_v]
del _v
# These entries have the same ID as entries already in the list
# to store duplicate tags that get written to the rpmdb for
# relocated packages or ia64 compat packages (i386 on ia64).
rpmdbtag["dirindexes2"] = [1116, RPM_INT32, None, 0, "dirindexes2"]
rpmdbtag["dirnames2"] = [1118, RPM_STRING_ARRAY, None, 0, "dirnames2"]
rpmdbtag["basenames2"] = [1117, RPM_STRING_ARRAY, None, 0, "basenames2"]
install_keys["dirindexes2"] = 1
install_keys["dirnames2"] = 1
install_keys["basenames2"] = 1
importanttags = {"name":1, "epoch":1, "version":1, "release":1, "arch":1,
"providename":1, "provideflags":1, "provideversion":1,
"requirename":1, "requireflags":1, "requireversion":1,
"obsoletename":1, "obsoleteflags":1, "obsoleteversion":1,
"conflictname":1, "conflictflags":1, "conflictversion":1,
"triggername":1, "triggerflags":1, "triggerversion":1,
"triggerscripts":1, "triggerscriptprog":1, "triggerindex":1,
"prein":1, "preinprog":1, "postin":1, "postinprog":1,
"preun":1, "preunprog":1, "postun":1, "postunprog":1,
"verifyscript":1, "verifyscriptprog":1,
"payloadformat":1, "payloadcompressor":1, "immutable":1,
"oldfilenames":1, "dirindexes":1, "dirnames":1, "basenames":1,
"fileusername":1, "filegroupname":1, "filemodes":1,
"filemtimes":1, "filedevices":1, "fileinodes":1, "filesizes":1,
"filemd5s":1, "filerdevs":1, "filelinktos":1, "fileflags":1,
"filecolors":1, "archivesize":1}
for _v in importanttags.keys():
_value = rpmtag[_v]
importanttags[_v] = _value
importanttags[_value[0]] = _value
versiontag = {"version":1}
for _v in versiontag.keys():
_value = rpmtag[_v]
versiontag[_v] = _value
versiontag[_value[0]] = _value
del _value
del _v
# Info within the sig header.
rpmsigtag = {
# size of gpg/dsaheader sums differ between 64/65(contains "\n")
"dsaheader": [267, RPM_BIN, None, 0], # only about header
"gpg": [1005, RPM_BIN, None, 0], # header+payload
"header_signatures": [62, RPM_BIN, 16, 0],
"payloadsize": [1007, RPM_INT32, 1, 0],
"size_in_sig": [1000, RPM_INT32, 1, 0],
"sha1header": [269, RPM_STRING, None, 0],
"md5": [1004, RPM_BIN, 16, 0],
# legacy entries in older rpm packages:
"pgp": [1002, RPM_BIN, None, 1],
"badsha1_1": [264, RPM_STRING, None, 1],
"badsha1_2": [265, RPM_STRING, None, 1] # size added in reversed order
}
# Add a reverse mapping for all tags plus the name again.
for _v in rpmsigtag.keys():
rpmsigtag[_v].append(_v)
for _v in rpmsigtag.values():
rpmsigtag[_v[0]] = _v
if len(_v) != 5:
raise ValueError, "rpmsigtag has wrong entries"
del _v
# How to sync signature and normal header for rpmdb.
# "pgp" should also have a matching entry.
headermatch = (
("dsaheader", "install_dsaheader"),
("md5", "install_md5"),
("gpg", "install_gpg"),
("sha1header", "install_sha1header"),
("size_in_sig", "install_size_in_sig"),
("badsha1_1", "install_badsha1_1"),
("badsha1_2", "install_badsha1_2"),
("payloadsize", "archivesize"),
# need to be generated for the rpmdb:
#"installtime", "filestates", "instprefixes", "installcolor", "installtid"
)
# Names of all possible kernel packages:
kernelpkgs = ["kernel", "kernel-PAE", "kernel-bigmem", "kernel-enterprise",
"kernel-hugemem", "kernel-summit", "kernel-smp", "kernel-largesmp",
"kernel-xen", "kernel-xen0", "kernel-xenU", "kernel-kdump", "kernel-BOOT"]
# Packages which are always installed and not updated:
installonlypkgs = kernelpkgs[:]
installonlypkgs.extend( ["gpg-pubkey",
"kernel-debug", "kernel-devel", "kernel-debug-devel", "kernel-PAE-debug",
"kernel-PAE-debug-devel", "kernel-PAE-devel", "kernel-hugemem-devel",
"kernel-smp-devel", "kernel-largesmp-devel", "kernel-xen-devel",
"kernel-xen0-devel", "kernel-xenU-devel", "kernel-kdump-devel",
"kernel-source", "kernel-unsupported", "kernel-modules"] )
# This is RPMCANONCOLOR in /bin/rpm source, values change over time.
def getInstallColor(arch):
if arch == "ia64": # also "0" and "3" have been here
return 2
elif arch in ("ia32e", "amd64", "x86_64", "sparc64", "s390x",
"powerpc64") or arch.startswith("ppc"):
return 3
return 0
# Buildarchtranslate table for multilib stuff
buildarchtranslate = {
"osfmach3_i686": "i386",
"osfmach3_i586": "i386",
"osfmach3_i486": "i386",
"osfmach3_i386": "i386",
"athlon": "i386",
"pentium4": "i386",
"pentium3": "i386",
"i686": "i386",
"i586": "i386",
"i486": "i386",
"alphaev5": "alpha",
"alphaev56": "alpha",
"alphapca56": "alpha",
"alphaev6": "alpha",
"alphaev67": "alpha",
"sun4c": "sparc",
"sun4d": "sparc",
"sun4m": "sparc",
"sparcv8": "sparc",
"sparcv9": "sparc",
"sun4u": "sparc64",
"osfmach3_ppc": "ppc",
"powerpc": "ppc",
"powerppc": "ppc",
"ppc8260": "ppc",
"ppc8560": "ppc",
"ppc32dy4": "ppc",
"ppciseries": "ppc",
"ppcpseries": "ppc",
"ppc64pseries": "ppc64",
"ppc64iseries": "ppc64",
"atarist": "m68kmint",
"atariste": "m68kmint",
"ataritt": "m68kmint",
"falcon": "m68kmint",
"atariclone": "m68kmint",
"milan": "m68kmint",
"hades": "m68kmint",
"amd64": "x86_64",
"ia32e": "x86_64"
}
# arch => compatible archs, best match first
arch_compats = {
"athlon": ["i686", "i586", "i486", "i386"],
"i686": ["i586", "i486", "i386"],
"i586": ["i486", "i386"],
"i486": ["i386",],
"x86_64": ["amd64", "athlon", "i686", "i586", "i486", "i386"],
"amd64": ["x86_64", "athlon", "i686", "i586", "i486", "i386"],
"ia32e": ["x86_64", "athlon", "i686", "i586", "i486", "i386"],
"ia64": ["i686", "i586", "i486", "i386"],
"alphaev67": ["alphaev6", "alphapca56", "alphaev56", "alphaev5", "alpha",
"axp"],
"alphaev6": ["alphapca56", "alphaev56", "alphaev5", "alpha", "axp"],
"alphapca56": ["alphaev56", "alphaev5", "alpha", "axp"],
"alphaev56": ["alphaev5", "alpha", "axp"],
"alphaev5": ["alpha", "axp"],
"alpha": ["axp",],
"osfmach3_i686": ["i686", "osfmach3_i586", "i586", "osfmach3_i486", "i486",
"osfmach3_i386", "i486", "i386"],
"osfmach3_i586": ["i586", "osfmach3_i486", "i486", "osfmach3_i386", "i486",
"i386"],
"osfmach3_i486": ["i486", "osfmach3_i386", "i486", "i386"],
"osfmach3_i386": ["i486", "i386"],
"osfmach3_ppc": ["ppc", "rs6000"],
"powerpc": ["ppc", "rs6000"],
"powerppc": ["ppc", "rs6000"],
"ppc8260": ["ppc", "rs6000"],
"ppc8560": ["ppc", "rs6000"],
"ppc32dy4": ["ppc", "rs6000"],
"ppciseries": ["ppc", "rs6000"],
"ppcpseries": ["ppc", "rs6000"],
"ppc64": ["ppc", "rs6000"],
"ppc": ["rs6000",],
"ppc64pseries": ["ppc64", "ppc", "rs6000"],
"ppc64iseries": ["ppc64", "ppc", "rs6000"],
"sun4c": ["sparc",],
"sun4d": ["sparc",],
"sun4m": ["sparc",],
"sun4u": ["sparc64", "sparcv9", "sparc"],
"sparc64": ["sparcv9", "sparc"],
"sparcv9": ["sparc",],
"sparcv8": ["sparc",],
"hppa2.0": ["hppa1.2", "hppa1.1", "hppa1.0", "parisc"],
"hppa1.2": ["hppa1.1", "hppa1.0", "parisc"],
"hppa1.1": ["hppa1.0", "parisc"],
"hppa1.0": ["parisc",],
"armv4l": ["armv3l",],
"atarist": ["m68kmint",],
"atariste": ["m68kmint",],
"ataritt": ["m68kmint",],
"falcon": ["m68kmint",],
"atariclone": ["m68kmint",],
"milan": ["m68kmint",],
"hades": ["m68kmint",],
"s390x": ["s390",],
}
def setMachineDistance(arch, archlist=None):
h = {}
h["noarch"] = 0 # noarch is best
h[arch] = 1 # second best is same arch
ind = 2
if archlist == None:
archlist = arch_compats.get(arch, [])
for a in archlist:
h[a] = ind
ind += 1
return h
# check arch names against this list
possible_archs = {
"noarch":1, "i386":1, "i486":1, "i586":1, "i686":1,
"athlon":1, "pentium3":1, "pentium4":1, "x86_64":1, "ia32e":1, "ia64":1,
"alpha":1, "alphaev56":1, "alphaev6":1, "axp":1,
"sparc":1, "sparc64":1, "sparcv9":1,
"s390":1, "s390x":1,
"ppc":1, "ppc64":1, "ppc64iseries":1, "ppc64pseries":1, "ppcpseries":1,
"ppciseries":1, "ppcmac":1, "ppc8260":1, "m68k":1,
"arm":1, "armv3l":1, "armv4b":1, "armv4l":1, "armv4tl":1, "armv5tel":1,
"armv5tejl":1, "armv6l":1,
"mips":1, "mipseb":1, "mipsel":1, "hppa":1, "sh":1,
}
possible_scripts = {
None: 1,
"/bin/sh": 1,
"/sbin/ldconfig": 1,
"/usr/bin/fc-cache": 1,
"/usr/bin/scrollkeeper-update": 1,
"/usr/sbin/build-locale-archive": 1,
"/usr/sbin/glibc_post_upgrade": 1,
"/usr/sbin/glibc_post_upgrade.i386": 1,
"/usr/sbin/glibc_post_upgrade.i686": 1,
"/usr/sbin/glibc_post_upgrade.ppc": 1,
"/usr/sbin/glibc_post_upgrade.ppc64": 1,
"/usr/sbin/glibc_post_upgrade.ia64": 1,
"/usr/sbin/glibc_post_upgrade.s390": 1,
"/usr/sbin/glibc_post_upgrade.s390x": 1,
"/usr/sbin/glibc_post_upgrade.x86_64": 1,
"/usr/sbin/libgcc_post_upgrade": 1,
"/usr/bin/rebuild-gcj-db": 1,
"/usr/libexec/twisted-dropin-cache": 1,
"/usr/bin/texhash": 1,
}
def writeHeader(pkg, tags, taghash, region, skip_tags, useinstall, rpmgroup):
"""Use the data "tags" and change it into a rpmtag header."""
(offset, store, stags1, stags2, stags3) = (0, [], [], [], [])
# Sort by number and also first normal tags, then install_keys tags
# and at the end the region tag.
for tagname in tags.iterkeys():
tagnum = taghash[tagname][0]
if tagname == region:
stags3.append((tagnum, tagname))
elif tagname in skip_tags:
pass
elif useinstall and tagname in install_keys:
stags2.append((tagnum, tagname))
else:
stags1.append((tagnum, tagname))
stags1.sort()
newregion = None
genprovs = None
genindexes = None
if not stags3:
noffset = -(len(stags1) * 16) - 16
tags["immutable1"] = pack("!2IiI", 61, RPM_BIN, noffset, 16)
stags3.append((61, "immutable1"))
newregion = 1
if pkg and pkg["providename"] == None:
genprovs = 1
pkg["providename"] = (pkg["name"],)
pkg["provideflags"] = (RPMSENSE_EQUAL,)
pkg["provideversion"] = (pkg.getEVR(),)
stags2.append((1047, "providename"))
stags2.append((1112, "provideflags"))
stags2.append((1113, "provideversion"))
if pkg and pkg["dirindexes"] == None:
genindexes = 1
(pkg["basenames"], pkg["dirindexes"], pkg["dirnames"]) \
= genBasenames(pkg["oldfilenames"])
stags2.append((1116, "dirindexes"))
stags2.append((1117, "basenames"))
stags2.append((1118, "dirnames"))
stags2.sort()
stags1.extend(stags3)
stags1.extend(stags2)
indexdata = []
for (tagnum, tagname) in stags1:
value = tags[tagname]
ttype = taghash[tagnum][1]
count = len(value)
pad = 0
if ttype == RPM_ARGSTRING:
if isinstance(value, basestring):
ttype = RPM_STRING
else:
ttype = RPM_STRING_ARRAY
elif ttype == RPM_GROUP:
ttype = RPM_I18NSTRING
if rpmgroup:
ttype = rpmgroup
if ttype == RPM_INT32:
if taghash[tagnum][3] & 8:
data = pack("!%di" % count, *value)
else:
data = pack("!%dI" % count, *value)
pad = (4 - (offset % 4)) % 4
elif ttype == RPM_STRING:
count = 1
#PY3: data = value + b"\x00"
data = "%s\x00" % value
elif ttype == RPM_STRING_ARRAY or ttype == RPM_I18NSTRING:
# python-only
data = "".join( [ "%s\x00" % value[i] for i in xrange(count) ] )
#PY3: data = data.encode()
#dummy line for the above one
# python-only-end
# pyrex-code
#k = []
#for i in xrange(count):
# k.append("%s\x00" % value[i])
#data = "".join(k)
# pyrex-code-end
elif ttype == RPM_BIN:
data = value
elif ttype == RPM_INT16:
data = pack("!%dH" % count, *value)
pad = (2 - (offset % 2)) % 2
elif ttype == RPM_CHAR or ttype == RPM_INT8:
data = pack("!%dB" % count, *value)
elif ttype == RPM_INT64:
data = pack("!%dQ" % count, *value)
pad = (8 - (offset % 8)) % 8
if pad:
offset += pad
#PY3: store.append(b"\x00" * pad)
store.append("\x00" * pad)
store.append(data)
index = pack("!4I", tagnum, ttype, offset, count)
offset += len(data)
if tagname == region: # data for region tag is first
indexdata.insert(0, index)
else:
indexdata.append(index)
if newregion:
del tags["immutable1"]
if genprovs:
del pkg["providename"]
del pkg["provideflags"]
del pkg["provideversion"]
if genindexes:
del pkg["basenames"]
del pkg["dirindexes"]
del pkg["dirnames"]
indexNo = len(stags1)
#PY3: store = b"".join(store)
store = "".join(store)
#PY3: indexdata = b"".join(indexdata)
indexdata = "".join(indexdata)
return (indexNo, len(store), indexdata, store)
# locale independend string methods
def _xisalpha(c):
return (c >= "a" and c <= "z") or (c >= "A" and c <= "Z")
def _xisdigit(c):
return c >= "0" and c <= "9"
def _xisalnum(c):
return ((c >= "a" and c <= "z") or (c >= "A" and c <= "Z")
or (c >= "0" and c <= "9"))
# compare two strings, rpm/lib/rpmver.c:rpmvercmp()
def stringCompare(str1, str2):
""" Loop through each version segment (alpha or numeric) of
str1 and str2 and compare them. """
if str1 == str2:
return 0
lenstr1 = len(str1)
lenstr2 = len(str2)
i1 = 0
i2 = 0
while i1 < lenstr1 and i2 < lenstr2:
# remove leading separators
while i1 < lenstr1 and not _xisalnum(str1[i1]):
i1 += 1
while i2 < lenstr2 and not _xisalnum(str2[i2]):
i2 += 1
if i1 == lenstr1 or i2 == lenstr2: # bz 178798
break
# start of the comparison data, search digits or alpha chars
j1 = i1
j2 = i2
if j1 < lenstr1 and _xisdigit(str1[j1]):
while j1 < lenstr1 and _xisdigit(str1[j1]):
j1 += 1
while j2 < lenstr2 and _xisdigit(str2[j2]):
j2 += 1
isnum = 1
else:
while j1 < lenstr1 and _xisalpha(str1[j1]):
j1 += 1
while j2 < lenstr2 and _xisalpha(str2[j2]):
j2 += 1
isnum = 0
# check if we already hit the end
if j1 == i1:
return -1
if j2 == i2:
if isnum:
return 1
return -1
if isnum:
# ignore leading "0" for numbers (1.01 == 1.000001)
while i1 < j1 and str1[i1] == "0":
i1 += 1
while i2 < j2 and str2[i2] == "0":
i2 += 1
# longer size of digits wins
if j1 - i1 > j2 - i2:
return 1
if j2 - i2 > j1 - i1:
return -1
x = cmp(str1[i1:j1], str2[i2:j2])
if x:
return x
# move to next comparison start
i1 = j1
i2 = j2
if i1 == lenstr1:
if i2 == lenstr2:
return 0
return -1
return 1
# EVR compare: uses stringCompare to compare epoch/version/release
def labelCompare(e1, e2):
# remove comparison of the release string if one of them is missing
r = stringCompare(e1[0], e2[0])
if r == 0:
r = stringCompare(e1[1], e2[1])
if r == 0 and e1[2] != "" and e2[2] != "":
r = stringCompare(e1[2], e2[2])
return r
def pkgCompare(one, two):
return labelCompare((one.getEpoch(), one["version"], one["release"]),
(two.getEpoch(), two["version"], two["release"]))
def rangeCompare(flag1, evr1, flag2, evr2):
"""Check whether (RPMSENSE_* flag, (E, V, R) evr) pairs (flag1, evr1)
and (flag2, evr2) intersect.
Return 1 if they do, 0 otherwise. Assumes at least one of RPMSENSE_EQUAL,
RPMSENSE_LESS or RPMSENSE_GREATER is each of flag1 and flag2."""
sense = labelCompare(evr1, evr2)
if sense < 0:
if (flag1 & RPMSENSE_GREATER) or (flag2 & RPMSENSE_LESS):
return 1
elif sense > 0:
if (flag1 & RPMSENSE_LESS) or (flag2 & RPMSENSE_GREATER):
return 1
else: # elif sense == 0:
if ((flag1 & RPMSENSE_EQUAL) and (flag2 & RPMSENSE_EQUAL)) or \
((flag1 & RPMSENSE_LESS) and (flag2 & RPMSENSE_LESS)) or \
((flag1 & RPMSENSE_GREATER) and (flag2 & RPMSENSE_GREATER)):
return 1
return 0
def isCommentOnly(script):
"""Return 1 is script contains only empty lines or lines
starting with "#". """
for line in script.split("\n"):
line2 = line.strip()
if line2 and line2[0] != "#":
return 0
return 1
def makeDirs(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def setPerms(filename, uid, gid, mode, mtime):
if uid != None:
os.lchown(filename, uid, gid)
if mode != None:
os.chmod(filename, mode & 07777)
if mtime != None:
os.utime(filename, (mtime, mtime))
def Uri2Filename(filename):
"""Try changing a file:// url into a local filename, pass
everything else through."""
if filename[:6] == "file:/":
filename = filename[5:]
if filename[1] == "/":
idx = filename.index("/", 2)
filename = filename[idx:]
return filename
def isUrl(filename):
for url in ("http://", "ftp://", "file://"):
if filename.startswith(url):
return 1
return 0
def parseFile(filename, requested):
rethash = {}
for l in open(filename, "r").readlines():
tmp = l.split(":")
if tmp[0] in requested:
rethash[tmp[0]] = int(tmp[2])
return rethash
class UGid:
"""Store a list of user- and groupnames and transform them in uids/gids."""
def __init__(self, names=None):
self.ugid = {}
if names:
for name in names:
self.ugid.setdefault(name, name)
def transform(self, buildroot):
pass
class Uid(UGid):
def transform(self, buildroot):
# "uid=0" if no /etc/passwd exists at all.
if not os.path.exists(buildroot + "/etc/passwd"):
for uid in self.ugid.iterkeys():
self.ugid[uid] = 0
if uid != "root":
print "Warning: user %s not found, using uid 0." % uid
return
# Parse /etc/passwd if glibc is not yet installed.
if buildroot or not os.path.exists(buildroot + "/sbin/ldconfig"):
uidhash = parseFile(buildroot + "/etc/passwd", self.ugid)
for uid in self.ugid.iterkeys():
if uid in uidhash:
self.ugid[uid] = uidhash[uid]
else:
print "Warning: user %s not found, using uid 0." % uid
self.ugid[uid] = 0
return
# Normal lookup of users via glibc.
for uid in self.ugid.iterkeys():
if uid == "root":
self.ugid[uid] = 0
else:
try:
import pwd
self.ugid[uid] = pwd.getpwnam(uid)[2]
except KeyError:
print "Warning: user %s not found, using uid 0." % uid
self.ugid[uid] = 0
class Gid(UGid):
def transform(self, buildroot):
# "gid=0" if no /etc/group exists at all.
if not os.path.exists(buildroot + "/etc/group"):
for gid in self.ugid.iterkeys():
self.ugid[gid] = 0
if gid != "root":
print "Warning: group %s not found, using gid 0." % gid
return
# Parse /etc/group if glibc is not yet installed.
if buildroot or not os.path.exists(buildroot + "/sbin/ldconfig"):
gidhash = parseFile(buildroot + "/etc/group", self.ugid)
for gid in self.ugid.iterkeys():
if gid in gidhash:
self.ugid[gid] = gidhash[gid]
else:
print "Warning: group %s not found, using gid 0." % gid
self.ugid[gid] = 0
return
# Normal lookup of users via glibc.
for gid in self.ugid.iterkeys():
if gid == "root":
self.ugid[gid] = 0
else:
try:
import grp
self.ugid[gid] = grp.getgrnam(gid)[2]
except KeyError:
print "Warning: group %s not found, using gid 0." % gid
self.ugid[gid] = 0
class CPIO:
"""Read a cpio archive."""
def __init__(self, filename, fd, issrc, size=None):
self.filename = filename
self.fd = fd
self.issrc = issrc
self.size = size
def printErr(self, err):
print "%s: %s" % (self.filename, err)
def __readDataPad(self, size, pad=0):
data = doRead(self.fd, size)
pad = (4 - ((size + pad) % 4)) % 4
doRead(self.fd, pad)
if self.size != None:
self.size -= size + pad
return data
def readCpio(self, func, filenamehash, devinode, filenames, extract, db):
while 1:
# (magic, inode, mode, uid, gid, nlink, mtime, filesize,
# devMajor, devMinor, rdevMajor, rdevMinor, namesize, checksum)
data = doRead(self.fd, 110)
if self.size != None:
self.size -= 110
# CPIO ASCII hex, expanded device numbers (070702 with CRC)
if data[0:6] not in ("070701", "070702"):
self.printErr("bad magic reading CPIO header")
return None
namesize = int(data[94:102], 16)
filename = self.__readDataPad(namesize, 110).rstrip("\x00")
if filename == "TRAILER!!!":
if self.size != None and self.size != 0:
self.printErr("failed cpiosize check")
return None
return 1
if filename[:2] == "./":
filename = filename[1:]
if not self.issrc and filename[:1] != "/":
filename = "%s%s" % ("/", filename)
if filename[-1:] == "/" and filename != "/":
filename = filename[:-1]
if extract:
func(filename, int(data[54:62], 16), self.__readDataPad,
filenamehash, devinode, filenames, db)
else:
# (name, inode, mode, nlink, mtime, filesize, dev, rdev)
filedata = (filename, int(data[6:14], 16),
long(data[14:22], 16), int(data[38:46], 16),
long(data[46:54], 16), int(data[54:62], 16),
int(data[62:70], 16) * 256 + int(data[70:78], 16),
int(data[78:86], 16) * 256 + int(data[86:94], 16))
func(filedata, self.__readDataPad, filenamehash, devinode,
filenames, db)
return None
class HdrIndex:
def __init__(self):
self.hash = {}
self.__len__ = self.hash.__len__
self.__getitem__ = self.hash.get
self.get = self.hash.get
self.__delitem__ = self.hash.__delitem__
self.__setitem__ = self.hash.__setitem__
self.__contains__ = self.hash.__contains__
#PY3: self.has_key = self.hash.__contains__
self.has_key = self.hash.has_key
#self.__repr__ = self.hash.__repr__
def getOne(self, key):
value = self[key]
if value != None:
return value[0]
return value
class ReadRpm: # pylint: disable-msg=R0904
"""Read (Linux) rpm packages."""
def __init__(self, filename, verify=None, fd=None, strict=None,
nodigest=None):
self.filename = filename
self.verify = verify # enable/disable more data checking
self.fd = fd # filedescriptor
self.strict = strict
self.nodigest = nodigest # check md5sum/sha1 digests
self.issrc = 0
self.buildroot = "" # do we have a chroot-like start?
self.owner = None # are uid/gid set?
self.uid = None
self.gid = None
self.relocated = None
self.rpmgroup = None
# Further data posibly created later on:
#self.leaddata = first 96 bytes of lead data
#self.sigdata = binary blob of signature header
#self.sig = signature header parsed as HdrIndex()
#self.sigdatasize = size of signature header
#self.hdrdata = binary blob of header data
#self.hdr = header parsed as HdrIndex()
#self.hdrdatasize = size of header
def __repr__(self):
return self.getFilename()
def printErr(self, err):
print "%s: %s" % (self.filename, err)
def raiseErr(self, err):
raise ValueError, "%s: %s" % (self.filename, err)
def __openFd(self, offset=None, headerend=None):
if not self.fd:
if isUrl(self.filename):
import urlgrabber
hrange = None
if offset or headerend:
hrange = (offset, headerend)
try:
self.fd = urlgrabber.urlopen(self.filename, range=hrange,
timeout=float(urloptions["timeout"]),
retry=int(urloptions["retries"]),
keepalive=int(urloptions["keepalive"]),
proxies=urloptions["proxies"],
http_headers=urloptions["http_headers"])
except urlgrabber.grabber.URLGrabError: #, e:
self.printErr("could not open file")
#print str(e)
return 1
else:
try:
self.fd = open(self.filename, "rb")
except IOError:
self.printErr("could not open file")
return 1
if offset:
self.fd.seek(offset, 1)
return None
def closeFd(self):
if self.fd != None:
self.fd.close()
self.fd = None
def __relocatedFile(self, filename):
for (old, new) in self.relocated:
if not filename.startswith(old):
continue
if filename == old:
filename = new
elif filename[len(old)] == "/":
filename = new + filename[len(old):]
return filename
def __verifyLead(self, leaddata):
(_, major, minor, rpmtype, arch, name, osnum, sigtype) = \
unpack("!4s2B2H66s2H16x", leaddata)
failed = None
if (major not in (3, 4) or minor != 0 or
rpmtype not in (0, 1) or sigtype != 5 or
osnum not in (1, 21, 255, 256)):
failed = 1
name = name.rstrip("\x00")
if self.strict:
if not os.path.basename(self.filename).startswith(name):
failed = 1
if failed:
print major, minor, rpmtype, arch, name, osnum, sigtype
self.printErr("wrong data in rpm lead")
def __readIndex(self, pad, rpmdb=None):
if rpmdb:
data = self.fd.read(8)
(indexNo, storeSize) = unpack("!2I", data)
#PY3: magic = b"\x8e\xad\xe8\x01\x00\x00\x00\x00"
magic = "\x8e\xad\xe8\x01\x00\x00\x00\x00"
data = magic + data
if indexNo < 1:
self.raiseErr("bad index magic")
else:
data = self.fd.read(16)
(magic, indexNo, storeSize) = unpack("!8s2I", data)
#PY3: if magic != b"\x8e\xad\xe8\x01\x00\x00\x00\x00" or indexNo < 1:
if magic != "\x8e\xad\xe8\x01\x00\x00\x00\x00" or indexNo < 1:
self.raiseErr("bad index magic")
fmt = self.fd.read(16 * indexNo)
fmt2 = self.fd.read(storeSize)
padfmt = ""
padlen = 0
if pad != 1:
padlen = (pad - (storeSize % pad)) % pad
padfmt = self.fd.read(padlen)
if (len(fmt) != 16 * indexNo or len(fmt2) != storeSize or
padlen != len(padfmt)):
self.raiseErr("did not read Index correctly")
return (indexNo, storeSize, data, fmt, fmt2,
16 + len(fmt) + storeSize + padlen)
# pyrex-code
# def __parseIndex(self, indexNo, fmt, fmt2, dorpmtag):
# cdef int i, j, indexNo2, tag, ttype, offset, count, datalen
# cdef char * fmtsp, * fmt2sp
# cdef int * fmtp, * fmt2p
# indexNo2 = indexNo
# fmtsp = fmt
# fmt2sp = fmt2
# fmtp = <int *>fmtsp
# hdr = HdrIndex()
# if not dorpmtag:
# return hdr
# for i from 0 <= i < indexNo2:
# j = i * 4
# tag = ntohl(fmtp[j])
# myrpmtag = dorpmtag.get(tag)
# if not myrpmtag:
# continue
# nametag = myrpmtag[4]
# ttype = ntohl(fmtp[j + 1])
# offset = ntohl(fmtp[j + 2])
# count = ntohl(fmtp[j + 3])
# if ttype == cRPM_STRING:
# data = PyString_FromStringAndSize(fmt2sp + offset,
# strlen(fmt2sp + offset))
# if nametag == "group":
# self.rpmgroup = ttype
# elif ttype == cRPM_INT32:
# # distinguish between signed and unsigned ints
# if myrpmtag[3] & 8:
# #fmt2p = <int *>(fmt2sp + offset)
# #data = []
# #for j from 0 <= j < count:
# # data.append(ntohl(fmt2p[count]))
# data = unpack("!%di" % count,
# fmt2[offset:offset + count * 4])
# else:
# #fmt2p = <int *>(fmt2sp + offset)
# #data = []
# #for j from 0 <= j < count:
# # data.append(ntohl(fmt2p[count]))
# data = unpack("!%dI" % count,
# fmt2[offset:offset + count * 4])
# elif ttype == cRPM_STRING_ARRAY or ttype == cRPM_I18NSTRING:
# data = []
# for j from 0 <= j < count:
# datalen = strlen(fmt2sp + offset)
# data.append(PyString_FromStringAndSize(fmt2sp + offset,
# datalen))
# offset = offset + datalen + 1
# elif ttype == cRPM_BIN:
# data = fmt2[offset:offset + count]
# elif ttype == cRPM_INT16:
# data = unpack("!%dH" % count, fmt2[offset:offset + count * 2])
# elif ttype == cRPM_CHAR or ttype == cRPM_INT8:
# data = unpack("!%dB" % count, fmt2[offset:offset + count])
# elif ttype == cRPM_INT64:
# data = unpack("!%dQ" % count, fmt2[offset:offset + count * 8])
# pyrex-code-end
# python-only
def __parseIndex(self, indexNo, fmt, fmt2, dorpmtag):
hdr = HdrIndex()
if not dorpmtag:
return hdr
for i in xrange(0, indexNo * 16, 16):
(tag, ttype, offset, count) = unpack("!4I", fmt[i:i + 16])
myrpmtag = dorpmtag.get(tag)
if not myrpmtag:
#print "unknown tag:", (tag,ttype,offset,count), self.filename
continue
nametag = myrpmtag[4]
if ttype == RPM_STRING:
#PY3: data = fmt2[offset:fmt2.index(b"\x00", offset)]
data = fmt2[offset:fmt2.index("\x00", offset)]
if nametag == "group":
self.rpmgroup = ttype
elif ttype == RPM_INT32:
# distinguish between signed and unsigned ints
if myrpmtag[3] & 8:
data = unpack("!%di" % count,
fmt2[offset:offset + count * 4])
else:
data = unpack("!%dI" % count,
fmt2[offset:offset + count * 4])
elif ttype == RPM_STRING_ARRAY or ttype == RPM_I18NSTRING:
data = []
for _ in xrange(count):
#PY3: end = fmt2.index(b"\x00", offset)
end = fmt2.index("\x00", offset)
data.append(fmt2[offset:end])
offset = end + 1
elif ttype == RPM_BIN:
data = fmt2[offset:offset + count]
elif ttype == RPM_INT16:
data = unpack("!%dH" % count, fmt2[offset:offset + count * 2])
elif ttype == RPM_CHAR or ttype == RPM_INT8:
data = unpack("!%dB" % count, fmt2[offset:offset + count])
elif ttype == RPM_INT64:
data = unpack("!%dQ" % count, fmt2[offset:offset + count * 8])
# python-only-end
else:
self.raiseErr("unknown tag header")
data = None
# Ignore duplicate entries as long as they are identical.
# They happen for packages signed with several keys or for
# relocated packages in the rpmdb.
if hdr.hash.has_key(nametag):
if nametag == "dirindexes":
nametag = "dirindexes2"
elif nametag == "dirnames":
nametag = "dirnames2"
elif nametag == "basenames":
nametag = "basenames2"
else:
if self.strict or hdr[nametag] != data:
self.printErr("duplicate tag %d" % tag)
continue
hdr.hash[nametag] = data
return hdr
def setHdr(self):
self.__getitem__ = self.hdr.__getitem__
self.__delitem__ = self.hdr.__delitem__
self.__setitem__ = self.hdr.__setitem__
self.__contains__ = self.hdr.__contains__
self.has_key = self.hdr.has_key
#self.__repr__ = self.hdr.__repr__
def readHeader(self, sigtags, hdrtags, keepdata=None, rpmdb=None,
headerend=None):
if rpmdb == None:
if self.__openFd(None, headerend):
return 1
leaddata = self.fd.read(96)
#PY3: if leaddata[:4] != b"\xed\xab\xee\xdb" or len(leaddata) != 96:
if leaddata[:4] != "\xed\xab\xee\xdb" or len(leaddata) != 96:
#from binascii import b2a_hex
self.printErr("no rpm magic found")
#print "wrong lead: %s" % b2a_hex(leaddata[:4])
return 1
#PY3: self.issrc = (leaddata[7] == b"\x01")
self.issrc = (leaddata[7] == "\x01")
if self.verify:
self.__verifyLead(leaddata)
sigdata = self.__readIndex(8)
self.sigdatasize = sigdata[5]
hdrdata = self.__readIndex(1, rpmdb)
self.hdrdatasize = hdrdata[5]
if keepdata:
if rpmdb == None:
self.leaddata = leaddata
self.sigdata = sigdata
self.hdrdata = hdrdata
if not sigtags and not hdrtags:
return None
if self.verify or sigtags:
(sigindexNo, _, _, sigfmt, sigfmt2, _) = sigdata
self.sig = self.__parseIndex(sigindexNo, sigfmt, sigfmt2, sigtags)
(hdrindexNo, _, _, hdrfmt, hdrfmt2, _) = hdrdata
self.hdr = self.__parseIndex(hdrindexNo, hdrfmt, hdrfmt2, hdrtags)
self.setHdr()
if self.verify and self.__doVerify():
return 1
# hack: Save a tiny bit of memory by compressing the fileusername
# and filegroupname strings to be only stored once. Evil and maybe
# this does not make sense at all. At least this belongs into an
# extra function and not into the default path.
#for i in ("fileusername", "filegroupname"):
# if not self[i]:
# continue
# y = []
# z = {}
# for j in self[i]:
# z.setdefault(j, j)
# y.append(z[j])
# self[i] = y
return None
def setOwner(self, owner):
self.owner = owner
if owner:
self.uid = Uid(self["fileusername"])
self.uid.transform(self.buildroot)
self.gid = Gid(self["filegroupname"])
self.gid.transform(self.buildroot)
def verifyCpio(self, filedata, read_data, filenamehash, devinode, _, dummy):
# pylint: disable-msg=W0612,W0613
# Overall result is that apart from the filename information
# we should not depend on any data from the cpio header.
# Data is also stored in rpm tags and the cpio header has
# been broken in enough details to ignore it.
(filename, inode, mode, nlink, mtime, filesize, dev, rdev) = filedata
data = ""
if filesize:
data = read_data(filesize)
fileinfo = filenamehash.get(filename)
if fileinfo == None:
self.printErr("cpio file %s not in rpm header" % filename)
return
(fn, flag, mode2, mtime2, dev2, inode2, user, group, rdev2,
linkto, md5sum, i) = fileinfo
del filenamehash[filename]
# printconf-0.3.61-4.1.i386.rpm is an example where paths are
# stored like: /usr/share/printconf/tests/../mf.magic
# This makes the normapth() check fail and also gives trouble
# for the algorithm finding hardlinks as the files are also
# included with their normal path. So same dev/inode pairs
# can be hardlinks or they can be wrongly packaged rpms.
if self.strict and filename != os.path.normpath(filename):
self.printErr("failed: normpath(%s)" % filename)
isreg = S_ISREG(mode)
if self.strict:
if isreg and inode != inode2:
self.printErr("wrong fileinode for %s" % filename)
if mode != mode2:
self.printErr("wrong filemode for %s" % filename)
# uid/gid are ignored from cpio
# device/inode are only set correctly for regular files
di = None
if isreg:
di = devinode.get((dev, inode, md5sum))
if di == None:
ishardlink = 0
# nlink is only set correctly for hardlinks, so disable this check:
#if nlink != 1:
# self.printErr("wrong number of hardlinks")
else:
ishardlink = 1
di.remove(i)
if not di:
if not data:
self.printErr("must be 0-size hardlink: %s" % filename)
del devinode[(dev, inode, md5sum)]
else:
if data:
self.printErr("non-zero hardlink file, " + \
"but not the last: %s" % filename)
# Search for "normpath" to read why hardlinks might not
# be hardlinks, but only double stored files with "/../"
# stored in their filename. Broken packages out there...
##XXX; Move this test to the setup time.
##if self.strict and nlink != len(di):
## self.printErr("wrong number of hardlinks %s, %d / %d" % \
## (filename, nlink, len(di)))
# This case also happens e.g. in RHL6.2: procps-2.0.6-5.i386.rpm
# where nlinks is greater than the number of actual hardlinks.
#elif nlink > len(di):
# self.printErr("wrong number of hardlinks %s, %d / %d" % \
# (filename, nlink, len(di)))
if self.strict and mtime != mtime2:
self.printErr("wrong filemtimes for %s" % filename)
if isreg and filesize != self["filesizes"][i] and ishardlink == 0:
self.printErr("wrong filesize for %s" % filename)
if isreg and dev != dev2:
self.printErr("wrong filedevice for %s" % filename)
if self.strict and rdev != rdev2:
self.printErr("wrong filerdevs for %s" % filename)
if S_ISLNK(mode):
if data.rstrip("\x00") != linkto:
self.printErr("wrong filelinkto for %s" % filename)
elif isreg:
if not (filesize == 0 and ishardlink == 1):
ctx = md5.new()
ctx.update(data)
if ctx.hexdigest() != md5sum:
if self["filesizes"][i] != 0 and self["arch"] != "sparc":
self.printErr("wrong filemd5s for %s: %s, %s" \
% (filename, ctx.hexdigest(), md5sum))
def extractCpio(self, filename, datasize, read_data, filenamehash,
devinode, filenames, db):
# pylint: disable-msg=W0612
data = ""
if datasize:
data = read_data(datasize)
fileinfo = filenamehash.get(filename)
if fileinfo == None:
self.printErr("cpio file %s not in rpm header" % filename)
return
(fn, flag, mode, mtime, dev, inode, user, group, rdev,
linkto, md5sum, i) = fileinfo
del filenamehash[filename]
uid = gid = None
if self.owner:
uid = self.uid.ugid[user]
gid = self.gid.ugid[group]
if self.relocated:
filename = self.__relocatedFile(filename)
filename = "%s%s" % (self.buildroot, filename)
dirname = pathdirname(filename)
makeDirs(dirname)
doextract = 1
if db:
try:
(mode2, inode2, dev2, nlink2, uid2, gid2, filesize2,
atime2, mtime2, ctime2) = os.stat(filename)
# XXX consider reg / non-reg files
if (flag & RPMFILE_CONFIG) and S_ISREG(mode):
changedfile = 1
# XXX go through db if we find a same file
if changedfile:
if flag & RPMFILE_NOREPLACE:
filename += ".rpmnew"
else:
pass # ln file -> file.rpmorig
else:
pass # XXX higher arch and our package not noarch
#doextract = 0
except OSError:
pass
if not doextract:
return
if S_ISREG(mode):
di = devinode.get((dev, inode, md5sum))
if di == None or data:
(fd, tmpfilename) = mkstemp_file(dirname)
os.write(fd, data)
os.close(fd)
setPerms(tmpfilename, uid, gid, mode, mtime)
os.rename(tmpfilename, filename)
if di:
di.remove(i)
for j in di:
fn2 = filenames[j]
if self.relocated:
fn2 = self.__relocatedFile(fn2)
fn2 = "%s%s" % (self.buildroot, fn2)
dirname = pathdirname(fn2)
makeDirs(dirname)
tmpfilename = mkstemp_link(dirname, tmpprefix, filename)
if tmpfilename == None:
(fd, tmpfilename) = mkstemp_file(dirname)
os.write(fd, data)
os.close(fd)
setPerms(tmpfilename, uid, gid, mode, mtime)
os.rename(tmpfilename, fn2)
del devinode[(dev, inode, md5sum)]
elif S_ISDIR(mode):
makeDirs(filename)
setPerms(filename, uid, gid, mode, None)
elif S_ISLNK(mode):
#if (os.path.islink(filename) and
# os.readlink(filename) == linkto):
# return
tmpfile = mkstemp_symlink(dirname, tmpprefix, linkto)
setPerms(tmpfile, uid, gid, None, None)
os.rename(tmpfile, filename)
elif S_ISFIFO(mode):
tmpfile = mkstemp_mkfifo(dirname, tmpprefix)
setPerms(tmpfile, uid, gid, mode, mtime)
os.rename(tmpfile, filename)
elif S_ISCHR(mode) or S_ISBLK(mode):
if self.owner:
tmpfile = mkstemp_mknod(dirname, tmpprefix, mode, rdev)
setPerms(tmpfile, uid, gid, mode, mtime)
os.rename(tmpfile, filename)
# if not self.owner: we could give a warning here
elif S_ISSOCK(mode):
raise ValueError, "UNIX domain sockets can't be packaged."
else:
raise ValueError, "%s: not a valid filetype" % (oct(mode))
def getFilenames(self):
oldfilenames = self["oldfilenames"]
if oldfilenames != None:
return oldfilenames
basenames = self["basenames"]
if basenames == None:
return []
dirnames = self["dirnames"]
dirindexes = self["dirindexes"]
# python-only
return [ "%s%s" % (dirnames[dirindexes[i]], basenames[i])
for i in xrange(len(basenames)) ]
# python-only-end
# pyrex-code
#ret = []
#for i in xrange(len(basenames)):
# ret.append("%s%s" % (dirnames[dirindexes[i]], basenames[i]))
#return ret
# pyrex-code-end
def readPayload(self, func, filenames=None, extract=None, db=None):
self.__openFd(96 + self.sigdatasize + self.hdrdatasize)
# pylint: disable-msg=W0612
devinode = {} # this will contain possibly hardlinked files
filenamehash = {} # full filename of all files
if filenames == None:
filenames = self.getFilenames()
if filenames:
fileinfo = zip(filenames, self["fileflags"], self["filemodes"],
self["filemtimes"], self["filedevices"], self["fileinodes"],
self["fileusername"], self["filegroupname"], self["filerdevs"],
self["filelinktos"], self["filemd5s"],
xrange(len(self["fileinodes"])))
for (fn, flag, mode, mtime, dev, inode, user, group,
rdev, linkto, md5sum, i) in fileinfo:
if flag & (RPMFILE_GHOST | RPMFILE_EXCLUDE):
continue
filenamehash[fn] = fileinfo[i]
if S_ISREG(mode):
devinode.setdefault((dev, inode, md5sum), []).append(i)
for di in devinode.keys():
if len(devinode[di]) <= 1:
del devinode[di]
# sanity check hardlinks
if self.verify:
for hardlinks in devinode.itervalues():
j = hardlinks[0]
mode = self["filemodes"][j]
mtime = self["filemtimes"][j]
size = self["filesizes"][j]
for j in hardlinks[1:]:
# dev/inode/md5sum are already guaranteed to be the same
if self["filemodes"][j] != mode:
self.printErr("modes differ for hardlink")
if self["filemtimes"][j] != mtime:
self.printErr("mtimes differ for hardlink")
if self["filesizes"][j] != size:
self.printErr("sizes differ for hardlink")
cpiosize = self.sig.getOne("payloadsize")
archivesize = self.hdr.getOne("archivesize")
if archivesize != None:
if cpiosize == None:
cpiosize = archivesize
elif cpiosize != archivesize:
self.printErr("wrong archive size")
size_in_sig = self.sig.getOne("size_in_sig")
if size_in_sig != None:
size_in_sig -= self.hdrdatasize
if self["payloadcompressor"] in [None, "gzip"]:
if size_in_sig != None and size_in_sig >= 8:
size_in_sig -= 8
fd = PyGZIP(self.filename, self.fd, cpiosize, size_in_sig)
#fd = gzip.GzipFile(fileobj=self.fd)
elif self["payloadcompressor"] == "bzip2":
import bz2
if size_in_sig != None:
payload = self.fd.read(size_in_sig)
else:
payload = self.fd.read()
fd = StringIO(bz2.decompress(payload))
else:
self.printErr("unknown payload compression")
return
if self["payloadformat"] not in [None, "cpio"]:
self.printErr("unknown payload format")
return
c = CPIO(self.filename, fd, self.issrc, cpiosize)
if c.readCpio(func, filenamehash, devinode, filenames,
extract, db) == None:
pass # error output is already done
else:
for filename in filenamehash.iterkeys():
self.printErr("file not in cpio: %s" % filename)
if extract and devinode.keys():
self.printErr("hardlinked files remain from cpio")
# python-only
del c, fd
# python-only-end
self.closeFd()
def getSpecfile(self, filenames=None):
fileflags = self["fileflags"]
for i in xrange(len(fileflags)):
if fileflags[i] & RPMFILE_SPECFILE:
return i
if filenames == None:
filenames = self.getFilenames()
for i in xrange(len(filenames)):
if filenames[i].endswith(".spec"):
return i
return None
def getEpoch(self, default="0"):
e = self["epoch"]
if e == None:
return default
return str(e[0])
def getArch(self):
if self.issrc:
return "src"
return self["arch"]
def getNVR(self):
return "%s-%s-%s" % (self["name"], self["version"], self["release"])
def getNVRA(self):
return "%s-%s-%s.%s" % (self["name"], self["version"], self["release"],
self.getArch())
def getNA(self):
return "%s.%s" % (self["name"], self["arch"])
def getEVR(self):
"""Return [%epoch:]%version-%release."""
e = self["epoch"]
if e != None:
return "%d:%s-%s" % (e[0], self["version"], self["release"])
return "%s-%s" % (self["version"], self["release"])
def getNEVRA(self):
"""Return %name-[%epoch:]%version-%release.%arch."""
return "%s-%s.%s" % (self["name"], self.getEVR(), self.getArch())
def getNEVR0(self):
"""Return %name-[%epoch:]%version-%release."""
return "%s-%s:%s-%s" % (self["name"], self.getEpoch(),
self["version"], self["release"])
def getNEVRA0(self):
"""Return %name-[%epoch:]%version-%release.%arch."""
return "%s-%s:%s-%s.%s" % (self["name"], self.getEpoch(),
self["version"], self["release"], self.getArch())
def getFilename(self):
if opensuse and self.issrc and self["nosource"] != None:
return "%s-%s-%s.nosrc.rpm" % (self["name"], self["version"],
self["release"])
return "%s-%s-%s.%s.rpm" % (self["name"], self["version"],
self["release"], self.getArch())
def getFilename2(self):
return "%s-%s-%s.%s" % (self["name"], self["version"],
self["release"], self.getArch())
def __verifyDeps(self, name, flags, version):
n = self[name]
f = self[flags]
v = self[version]
if n == None:
if f != None or v != None:
self.printErr("wrong dep data")
else:
if (f == None and v != None) or (f != None and v == None):
self.printErr("wrong dep data")
if f == None:
f = [None] * len(n)
if v == None:
v = [None] * len(n)
if len(n) != len(f) or len(f) != len(v):
self.printErr("wrong length of deps for %s" % name)
def _getDeps(self, name, flags, version):
n = self[name]
if n == None:
return []
f = self[flags]
v = self[version]
if f == None:
f = [None] * len(n)
if v == None:
v = [None] * len(n)
return zip(n, f, v)
def getProvides(self):
provs = self._getDeps("providename", "provideflags", "provideversion")
if not self.issrc:
provs.append( (self["name"], RPMSENSE_EQUAL, self.getEVR()) )
return provs
def addProvides(self, phash):
for (name, flag, version) in self.getProvides():
phash.setdefault(name, []).append((flag, version, self))
def removeProvides(self, phash):
for (name, flag, version) in self.getProvides():
phash[name].remove((flag, version, self))
def getRequires(self):
return self._getDeps("requirename", "requireflags", "requireversion")
def getObsoletes(self):
return self._getDeps("obsoletename", "obsoleteflags",
"obsoleteversion")
def getConflicts(self):
return self._getDeps("conflictname", "conflictflags",
"conflictversion")
def addDeps(self, name, flag, version, phash):
for (n, f, v) in self._getDeps(name, flag, version):
phash.setdefault((n, f, v), []).append(self)
def removeDeps(self, name, flag, version, phash):
for (n, f, v) in self._getDeps(name, flag, version):
phash[(n, f, v)].remove(self)
def getTriggers(self):
deps = self._getDeps("triggername", "triggerflags", "triggerversion")
index = self["triggerindex"]
scripts = self["triggerscripts"]
progs = self["triggerscriptprog"]
if self.verify:
if deps == []:
if index != None or scripts != None or progs != None:
self.printErr("wrong triggers still exist")
else:
if len(scripts) != len(progs):
self.printErr("wrong triggers")
if index == None:
if len(deps) != len(scripts):
self.printErr("wrong triggers")
else:
if len(deps) != len(index):
self.printErr("wrong triggers")
# python-only
if index == None:
return [ (deps[i][0], deps[i][1], deps[i][2], progs[i], scripts[i])
for i in xrange(len(deps)) ]
return [ (deps[i][0], deps[i][1], deps[i][2], progs[index[i]],
scripts[index[i]]) for i in xrange(len(deps)) ]
# python-only-end
# pyrex-code
# return []
# pyrex-code-end
def genSigHeader(self):
"""Take data from the signature header and append it to the hdr."""
for (sig, hdr) in headermatch:
if self[hdr] != None and self.sig[sig] == None:
self.sig[sig] = self[hdr]
def genRpmdbHeader(self):
"""Take the rpmdb header data to again create a signature header."""
for (sig, hdr) in headermatch:
if self.sig[sig] != None and self[hdr] == None:
self[hdr] = self.sig[sig]
def isInstallonly(self):
"""Can several packages be installed at the same time or should this
rpm be normally installed only once?"""
if (self["name"] in installonlypkgs or
self["name"].startswith("kernel-module") or
"kernel-modules" in self.hdr.get("providename", [])):
return 1
return 0
def buildOnArch(self, arch):
# do not build if this arch is in the exclude list
exclude = self["excludearch"]
if exclude and arch in exclude:
return None
# do not build if this arch is not in the exclusive list
exclusive = self["exclusivearch"]
if exclusive and arch not in exclusive:
return None
# return 2 if this will build into a "noarch" rpm
if self["buildarchs"] == ["noarch"]:
return 2
# otherwise build this rpm normally for this arch
return 1
def getChangeLog(self, num=-1, newer=None):
""" Return the changlog entry in one string. """
ctext = self["changelogtext"]
if not ctext:
return ""
cname = self["changelogname"]
ctime = self["changelogtime"]
if num == -1 or num > len(ctext):
num = len(ctext)
data = []
for i in xrange(num):
if newer != None and ctime[i] <= newer:
break
data.append("* %s %s\n%s\n\n" % (time.strftime("%a %b %d %Y",
time.gmtime(ctime[i])), cname[i], ctext[i]))
return "".join(data)
def __verifyWriteHeader(self, hdrhash, taghash, region, hdrdata,
useinstall, rpmgroup):
(indexNo, storeSize, fmt, fmt2) = writeHeader(None, hdrhash, taghash,
region, {}, useinstall, rpmgroup)
if (indexNo, storeSize, fmt, fmt2) != (hdrdata[0], hdrdata[1],
hdrdata[3], hdrdata[4]):
self.printErr("(rpm-%s) writeHeader() would write a different " \
"normal header" % self["rpmversion"])
def getImmutableRegion(self):
"""rpmdb data has the original header data and then adds some items
from the signature header and some other info about the installed
package. This routine tries to get the unmodified data of the
original rpm header."""
# "immutable1" is set for old rpm headers for the entry in rpmdb.
if self["immutable1"] != None:
(tag, ttype, offset, count) = unpack("!4I", self.hdrdata[3][0:16])
if tag != 61 or ttype != RPM_BIN or count != 16:
return None
storeSize = offset
(tag, ttype, offset, count) = unpack("!2IiI",
self.hdrdata[4][offset:offset + 16])
if (tag != 61 or (-offset % 16 != 0) or
ttype != RPM_BIN or count != 16):
return None
indexNo = (-offset - 16) // 16
fmt = self.hdrdata[3][16:(indexNo + 1) * 16]
fmt2 = self.hdrdata[4][:storeSize]
return (indexNo, storeSize, fmt, fmt2)
if self["immutable"] == None:
return None
(tag, ttype, offset, count) = unpack("!4I", self.hdrdata[3][0:16])
if tag != rpmtag["immutable"][0] or ttype != RPM_BIN or count != 16:
return None
storeSize = offset + 16
(tag, ttype, offset, count) = unpack("!2IiI",
self.hdrdata[4][offset:storeSize])
if (tag != rpmtag["immutable"][0] or (-offset % 16 != 0) or
ttype != RPM_BIN or count != 16):
return None
indexNo = -offset // 16
fmt = self.hdrdata[3][:indexNo * 16]
fmt2 = self.hdrdata[4][:storeSize]
return (indexNo, storeSize, fmt, fmt2)
def __doVerify(self):
if self.rpmgroup not in (None, RPM_STRING, RPM_I18NSTRING):
self.printErr("rpmgroup out of range")
self.__verifyWriteHeader(self.hdr.hash, rpmtag,
"immutable", self.hdrdata, 1, self.rpmgroup)
if self.strict:
self.__verifyWriteHeader(self.sig.hash, rpmsigtag,
"header_signatures", self.sigdata, 0, None)
# disable the utf-8 test per default, should check against self.verbose:
if self.strict and not opensuse:
for i in ("summary", "description", "changelogtext"):
if self[i] == None:
continue
for j in self[i]:
try:
j.decode("utf-8")
except UnicodeDecodeError:
self.printErr("not utf-8 in %s" % i)
#self.printErr("text: %s" % j)
break
if not self.issrc and (self.strict and
self["name"][:6] == "kernel" and
self["name"] not in ("kernel-utils", "kernel-doc",
"kernel-pcmcia-cs", "kernel-debuginfo", "kernel-ib",
"kernel-headers") and not self.isInstallonly()):
self.printErr("possible kernel rpm")
for i in ("md5",):
if not self.sig.has_key(i):
self.printErr("sig header is missing: %s" % i)
for i in ("name", "version", "release", "arch", "rpmversion"):
if not self.hdr.has_key(i):
self.printErr("hdr is missing: %s" % i)
size_in_sig = self.sig.getOne("size_in_sig")
if size_in_sig != None and not isUrl(self.filename):
rpmsize = os.stat(self.filename).st_size
if rpmsize != 96 + self.sigdatasize + size_in_sig:
self.printErr("wrong size in rpm package: %d / %d" % \
(rpmsize, 96 + self.sigdatasize + size_in_sig))
filenames = self.getFilenames()
if self.issrc:
i = self.getSpecfile(filenames)
if i == None:
self.printErr("no specfile found in src.rpm")
else:
if self.strict and not filenames[i].endswith(".spec"):
self.printErr("specfile does not end with .spec")
#if self.strict and filenames[i] != self["name"] + ".spec":
# self.printErr("specfile not using default name: %s" % \
# filenames[i])
if self["sourcerpm"] != None:
self.printErr("binary rpm does contain sourcerpm tag")
else:
if self["sourcerpm"] == None:
self.printErr("source rpm does not contain sourcerpm tag")
if self["triggerscripts"] != None:
if len(self["triggerscripts"]) != len(self["triggerscriptprog"]):
self.printErr("wrong trigger lengths")
if self.strict:
for i in ("-", ":"):
if i in self["version"] or i in self["release"]:
self.printErr("version/release contains wrong char")
for i in (",", " ", "\t"):
if (i in self["name"] or i in self["version"] or
i in self["release"]):
self.printErr("name/version/release contains wrong char")
matchhash = re.compile("^[a-z0-9]{32}$")
for i in self.hdr.get("provideversion", []) + \
self.hdr.get("requireversion", []) + \
self.hdr.get("obsoleteversion", []) + \
self.hdr.get("conflictversion", []):
j = i.find(":")
if (j != -1 and not i[:j].isdigit()) or i.count(":") > 1:
self.printErr("wrong char ':' in deps")
if " " in i or "," in i or "\t" in i:
self.printErr("wrong char [ ,\\t] in deps")
if i.count("-") >= 2:
self.printErr("too many '-' in deps")
if i[:1] and not i[:1].isdigit() and not matchhash.match(i) \
and i != "%s-%s" % (self["version"], self["release"]):
self.printErr("dependency version starts " +
"with non-digit: %s" % i)
if "%" in i:
self.printErr("dependency version contains %%: %s" % i)
if self["payloadformat"] not in [None, "cpio", "drpm"]:
self.printErr("wrong payload format %s" % self["payloadformat"])
if self.strict:
if opensuse:
if self["payloadcompressor"] not in [None, "gzip", "bzip2"]:
self.printErr("no gzip/bzip2 compressor: %s" % \
self["payloadcompressor"])
else:
if self["payloadcompressor"] not in [None, "gzip"]:
self.printErr("no gzip compressor: %s" % \
self["payloadcompressor"])
else:
if self["payloadcompressor"] not in [None, "gzip", "bzip2"]:
self.printErr("no gzip/bzip2 compressor: %s" % \
self["payloadcompressor"])
if self.strict and self["payloadflags"] not in ["9"]:
self.printErr("no payload flags: %s" % self["payloadflags"])
if self.strict and self["os"] not in ["Linux", "linux"]:
self.printErr("bad os: %s" % self["os"])
elif self["os"] not in ["Linux", "linux", "darwin"]:
self.printErr("bad os: %s" % self["os"])
if self.strict:
if opensuse:
if self["packager"] not in ("http://bugs.opensuse.org",):
self.printErr("unknown packager: %s" % self["packager"])
if self["vendor"] not in (
"SUSE LINUX Products GmbH, Nuernberg, Germany",):
self.printErr("unknown vendor: %s" % self["vendor"])
else:
if self["packager"] not in (None, "Koji",
"Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>",
"Fedora Project <http://bugzilla.redhat.com/bugzilla>",
"Fedora Project",
"Matthias Saou <matthias@rpmforge.net>"):
self.printErr("unknown packager: %s" % self["packager"])
if self["vendor"] not in (None, "Red Hat, Inc.", "Koji",
"Fedora Project", "Livna.org RPMS", "Freshrpms.net"):
self.printErr("unknown vendor: %s" % self["vendor"])
if self["distribution"] not in (None, "Red Hat",
"Red Hat Linux", "Red Hat FC-3", "Red Hat (FC-3)",
"Red Hat (FC-4)", "Red Hat (FC-5)", "Red Hat (FC-6)",
"Red Hat (FC-7)", "Fedora Extras", "Red Hat (scratch)",
"Red Hat (RHEL-3)", "Red Hat (RHEL-4)",
"Red Hat (RHEL-5)", "Unknown"):
self.printErr("unknown distribution: %s" % \
self["distribution"])
arch = self["arch"]
if self["rhnplatform"] not in (None, arch):
self.printErr("unknown arch for rhnplatform")
if self.strict:
if os.path.basename(self.filename) != self.getFilename():
self.printErr("bad filename: %s/%s" % (self.filename,
self.getFilename()))
if opensuse:
if self["platform"] not in (None, "",
arch + "-suse-linux", "noarch-suse-linux"):
self.printErr("unknown arch %s" % self["platform"])
elif self["platform"] not in (None, "", arch + "-redhat-linux-gnu",
arch + "-redhat-linux", "--target=${target_platform}",
arch + "-unknown-linux",
"--target=${TARGET_PLATFORM}", "--target=$TARGET_PLATFORM"):
self.printErr("unknown arch %s" % self["platform"])
if self["exclusiveos"] not in (None, ["Linux"], ["linux"]):
self.printErr("unknown os %s" % self["exclusiveos"])
if self.strict:
if self["buildarchs"] not in (None, ["noarch"]):
self.printErr("bad buildarch: %s" % self["buildarchs"])
if self["excludearch"] != None:
for i in self["excludearch"]:
if i not in possible_archs:
self.printErr("new possible arch %s" % i)
if self["exclusivearch"] != None:
for i in self["exclusivearch"]:
if i not in possible_archs:
self.printErr("new possible arch %s" % i)
for (s, p) in (("prein", "preinprog"), ("postin", "postinprog"),
("preun", "preunprog"), ("postun", "postunprog"),
("verifyscript", "verifyscriptprog")):
(script, prog) = (self[s], self[p])
if script != None and prog == None:
self.printErr("no prog")
if self.strict:
if ((not isinstance(prog, basestring) and prog != None) or
prog not in possible_scripts):
self.printErr("unknown prog: %s" % prog)
if script == None and prog == "/bin/sh" and not opensuse:
self.printErr("empty script: %s" % s)
if script != None and isCommentOnly(script):
self.printErr("empty(2) script: %s" % s)
# some verify tests are also in these functions:
for (n, f, v) in (("providename", "provideflags", "provideversion"),
("requirename", "requireflags", "requireversion"),
("obsoletename", "obsoleteflags", "obsoleteversion"),
("conflictname", "conflictflags", "conflictversion"),
("triggername", "triggerflags", "triggerversion")):
self.__verifyDeps(n, f, v)
if not self.issrc:
provs = self._getDeps("providename", "provideflags",
"provideversion")
mydep = (self["name"], RPMSENSE_EQUAL, self.getEVR())
ver = self["rpmversion"]
# AS2.1 still has compat rpms which need this:
if ver != None and ver[:4] < "4.3." and mydep not in provs:
provs.append(mydep)
if mydep not in provs:
self.printErr("no provides for own rpm package, rpm=%s" % ver)
self.getTriggers()
# Check for /tmp/ and /usr/src in the provides:
if self.strict and self["providename"]:
for n in self["providename"]:
if n.find("/tmp/") != -1 or n.find("/usr/src") != -1:
self.printErr("suspicous provides: %s" % n)
# check file* tags to be consistent:
reqfiletags = ["fileusername", "filegroupname", "filemodes",
"filemtimes", "filedevices", "fileinodes", "filesizes",
"filemd5s", "filerdevs", "filelinktos", "fileflags"]
filetags = ["fileverifyflags", "filelangs", "filecolors", "fileclass",
"filedependsx", "filedependsn"]
x = self[reqfiletags[0]]
lx = None
if x != None:
lx = len(x)
for t in reqfiletags:
if self[t] == None or len(self[t]) != lx:
self.printErr("wrong length for tag %s" % t)
for t in filetags:
if self[t] != None and len(self[t]) != lx:
self.printErr("wrong length for tag %s" % t)
else:
for t in reqfiletags[:] + filetags[:]:
if self[t] != None:
self.printErr("non-None tag %s" % t)
if self["oldfilenames"]:
if (self["dirindexes"] != None or
self["dirnames"] != None or
self["basenames"] != None):
self.printErr("new filetag still present")
if lx != len(self["oldfilenames"]):
self.printErr("wrong length for tag oldfilenames")
elif self["dirindexes"]:
if (len(self["dirindexes"]) != lx or len(self["basenames"]) != lx
or self["dirnames"] == None):
self.printErr("wrong length for file* tag")
# Would genBasenames() generate the same output?
if (self["basenames"], list(self["dirindexes"]),
self["dirnames"]) != genBasenames(filenames):
self.printErr("dirnames/dirindexes is generated differently")
filemodes = self["filemodes"]
filemd5s = self["filemd5s"]
fileflags = self["fileflags"]
if filemodes:
for x in xrange(len(filemodes)):
if fileflags[x] & RPMFILE_EXCLUDE:
self.printErr("exclude flag set in rpm")
if fileflags[x] & (RPMFILE_GHOST | RPMFILE_EXCLUDE):
continue
if S_ISREG(filemodes[x]):
# All regular files except 0-sized files must have
# a md5sum.
if not filemd5s[x] and self["filesizes"][x] != 0:
self.printErr("missing filemd5sum, %d, %s" % (x,
filenames[x]))
elif filemd5s[x] != "":
print filemd5s[x]
self.printErr("non-regular file has filemd5sum")
# Verify count/flags for rpmheader tags.
for (indexNo, fmt, dorpmtag) in ((self.hdrdata[0], self.hdrdata[3],
rpmtag), (self.sigdata[0], self.sigdata[3], rpmsigtag)):
for i in xrange(0, indexNo * 16, 16):
(tag, ttype, offset, count) = unpack("!4I", fmt[i:i + 16])
t = dorpmtag[tag]
if t[2] != None and t[2] != count:
self.printErr("tag %d has wrong count %d" % (tag, count))
if self.strict and (t[3] & 1):
if not opensuse or tag not in (1012, 1013, 1051, 1052,
1123, 1152, 1154, 1156, 1157, 1158, 1159, 1160, 1161):
self.printErr("tag %d is old" % tag)
if self.issrc:
if (t[3] & 4):
self.printErr("tag %d should be for binary rpms" % tag)
else:
if (t[3] & 2):
self.printErr("tag %d should be for src rpms" % tag)
# Verify region headers have sane data. We do not support more than
# one region header at this point.
if self["immutable"] != None:
(tag, ttype, offset, count) = unpack("!4I", self.hdrdata[3][0:16])
if tag != rpmtag["immutable"][0] or ttype != RPM_BIN or count != 16:
self.printErr("region tag not at the beginning of the header")
elif offset + 16 != self.hdrdata[1]:
self.printErr("wrong length of tag header detected")
for (data, regiontag) in ((self["immutable"], rpmtag["immutable"][0]),
(self.sig["header_signatures"], rpmsigtag["header_signatures"][0])):
if data == None:
continue
(tag, ttype, offset, count) = unpack("!2IiI", data)
if tag != regiontag or ttype != RPM_BIN or count != 16:
self.printErr("region has wrong tag/type/count")
if -offset % 16 != 0:
self.printErr("region has wrong offset")
if (regiontag == rpmtag["immutable"][0] and
-offset // 16 != self.hdrdata[0]):
self.printErr("region tag %s only for partial header: %d, %d" \
% (regiontag, self.hdrdata[0], -offset // 16))
if self.nodigest:
return 0
# sha1 of the header
sha1header = self.sig["sha1header"]
if sha1header:
ctx = sha1.new()
ctx.update(self.hdrdata[2])
ctx.update(self.hdrdata[3])
ctx.update(self.hdrdata[4])
if ctx.hexdigest() != sha1header:
self.printErr("wrong sha1: %s / %s" % (sha1header,
ctx.hexdigest()))
return 1
# md5sum of header plus payload
md5sum = self.sig["md5"]
if md5sum:
ctx = md5.new()
ctx.update(self.hdrdata[2])
ctx.update(self.hdrdata[3])
ctx.update(self.hdrdata[4])
data = self.fd.read(65536)
while data:
ctx.update(data)
data = self.fd.read(65536)
# make sure we re-open this file if we read the payload
self.closeFd()
if ctx.digest() != md5sum:
from binascii import b2a_hex
self.printErr("wrong md5: %s / %s" % (b2a_hex(md5sum),
ctx.hexdigest()))
return 1
return 0
def unlinkRpmdbCache(dbpath):
for i in xrange(9):
try:
os.unlink(dbpath + "__db.00%d" % i)
except OSError:
pass
def readReleaseVer(distroverpkg, buildroot="", rpmdbpath="/var/lib/rpm/"):
"""Search for distroverpkg within the Provides: of the rpmdb.
Return with the very first entry found."""
import bsddb
dbpath = buildroot + rpmdbpath
unlinkRpmdbCache(dbpath) # XXX needed for read-only access???
providename_db = None # XXX Create a class to access/cache access?
packages_db = None
for rpmname in distroverpkg:
if providename_db == None:
providename_db = bsddb.hashopen(dbpath + "Providename", "r")
data = providename_db.get(rpmname, "") # pylint: disable-msg=E1101
for i in xrange(0, len(data), 8):
if packages_db == None:
packages_db = bsddb.hashopen(dbpath + "Packages", "r")
data1 = packages_db.get(data[i:i + 4]) # pylint: disable-msg=E1101
if data1:
fd = StringIO(data1)
pkg = ReadRpm("rpmdb", fd=fd)
pkg.readHeader(None, versiontag, rpmdb=1)
return pkg["version"]
return None
class RpmDB:
#zero = pack("I", 0)
def __init__(self, buildroot="", rpmdbpath="/var/lib/rpm/"):
self.buildroot = buildroot
self.rpmdbpath = rpmdbpath
self._pkgs = {}
self.openDB4()
def openDB4(self):
import bsddb
dbpath = self.buildroot + self.rpmdbpath
makeDirs(dbpath)
unlinkRpmdbCache(dbpath)
flag = "c"
self.basenames_db = bsddb.hashopen(dbpath + "Basenames", flag)
self.conflictname_db = bsddb.hashopen(dbpath + "Conflictname", flag)
self.dirnames_db = bsddb.btopen(dbpath + "Dirnames", flag)
self.filemd5s_db = bsddb.hashopen(dbpath + "Filemd5s", flag)
self.group_db = bsddb.hashopen(dbpath + "Group", flag)
self.installtid_db = bsddb.btopen(dbpath + "Installtid", flag)
self.name_db = bsddb.hashopen(dbpath + "Name", flag)
self.packages_db = bsddb.hashopen(dbpath + "Packages", flag)
self.providename_db = bsddb.hashopen(dbpath + "Providename", flag)
self.provideversion_db = bsddb.btopen(dbpath + "Provideversion", flag)
self.requirename_db = bsddb.hashopen(dbpath + "Requirename", flag)
self.requireversion_db = bsddb.btopen(dbpath + "Requireversion", flag)
self.sha1header_db = bsddb.hashopen(dbpath + "Sha1header", flag)
self.sigmd5_db = bsddb.hashopen(dbpath + "Sigmd5", flag)
self.triggername_db = bsddb.hashopen(dbpath + "Triggername", flag)
#def getPkgById(self, id2):
# if self._pkgs.has_key(id2):
# return self._pkgs[id2]
# else:
# #pkg = self.readRpm(id2, self.packages_db, self.tags)
# #if pkg is not None:
# # self._pkgs[id2] = pkg
# #return pkg
# return None
def searchFilenames(self, filename):
(dirname, basename) = pathsplit2(filename)
data1 = self.basenames_db.get(basename, "") # pylint: disable-msg=E1101
data2 = self.dirnames_db.get(dirname, "") # pylint: disable-msg=E1101
dirname_ids = {}
for i in xrange(0, len(data2), 8):
id_ = data2[i:i + 4]
dirname_ids[id_] = None
result = []
for i in xrange(0, len(data1), 8):
id_ = data1[i:i + 4]
if id_ not in dirname_ids:
continue
idx = unpack("I", data1[i + 4:i + 8])[0]
#pkg = self.getPkgById(id_)
#if pkg and pkg.iterFilenames()[idx] == filename:
# result.append(pkg)
result.append( (id_, idx) )
return result
def readRpm(filenames, sigtag, tag):
rpms = []
for filename in filenames:
rpm = ReadRpm(filename)
if rpm.readHeader(sigtag, tag):
print "Cannot read %s.\n" % filename
continue
rpm.closeFd()
rpms.append(rpm)
return rpms
def verifyRpm(filename, verify, strict, payload, nodigest, hdrtags, keepdata,
headerend):
"""Read in a complete rpm and verify its integrity."""
rpm = ReadRpm(filename, verify, strict=strict, nodigest=nodigest)
if not nodigest or payload:
headerend = None
if rpm.readHeader(rpmsigtag, hdrtags, keepdata, headerend=headerend):
return None
if payload:
rpm.readPayload(rpm.verifyCpio)
rpm.closeFd()
return rpm
def extractRpm(filename, buildroot, owner=None, db=None):
"""Extract a rpm into a directory."""
if isinstance(filename, basestring):
rpm = ReadRpm(filename)
if rpm.readHeader(rpmsigtag, rpmtag):
return None
else:
rpm = filename
rpm.buildroot = buildroot
if rpm.issrc:
if buildroot[-1:] != "/" and buildroot != "":
buildroot += "/"
else:
buildroot = buildroot.rstrip("/")
rpm.buildroot = buildroot
rpm.setOwner(owner)
rpm.readPayload(rpm.extractCpio, extract=1, db=db)
def sameSrcRpm(a, b):
# Packages with the same md5sum for the payload are the same.
amd5sum = a.sig["md5"]
if amd5sum != None and amd5sum == b.sig["md5"]:
return 1
# Check if all regular files are the same in both packages.
amd5s = []
for (md5sum, name, mode) in zip(a["filemd5s"], a.getFilenames(),
a["filemodes"]):
if S_ISREG(mode):
amd5s.append((md5sum, name))
amd5s.sort()
bmd5s = []
for (md5sum, name, mode) in zip(b["filemd5s"], b.getFilenames(),
b["filemodes"]):
if S_ISREG(mode):
bmd5s.append((md5sum, name))
bmd5s.sort()
return amd5s == bmd5s
def ignoreBinary():
return "\.cin$\n\.ogg$\n\.gz$\n\.tgz$\n\.tar$\n\.taz$\n\.tbz$\n\.bz2$\n" \
"\.z$\n\.Z$\n\.zip$\n\.ttf$\n\.db$\n\.jar$\n\.pdf$\n\.sdf$\n\.war$\n" \
"\.gsi$\n\.uqm$\n\.weight$\n\.ps$\n"
def isBinary(filename):
for i in (".cin", ".ogg", ".gz", ".tgz", ".tar", ".taz", ".tbz", ".bz2",
".z", ".Z", ".zip", ".ttf", ".db", ".jar", ".pdf", ".sdf", ".war",
".gsi", ".uqm", ".weight", ".ps"):
if filename.endswith(i):
return 1
return 0
def explodeFile(filename, dirname, version):
if filename.endswith(".tar.gz"):
explode = "z"
dirn = filename[:-7]
elif filename.endswith(".tar.bz2"):
explode = "j"
dirn = filename[:-8]
else:
return
newdirn = dirn
if newdirn.endswith(version):
newdirn = newdirn[:- len(version)]
while newdirn[-1] in "-_.0123456789":
newdirn = newdirn[:-1]
os.system("cd " + dirname + " && { tar x" + explode + "f " + filename \
+ "; for i in * ; do test -d \"$i\" && mv \"$i\" " + newdirn \
+ "; done; }")
return newdirn
delim = "--- -----------------------------------------------------" \
"---------------------\n"
def diffTwoSrpms(oldsrpm, newsrpm, explode=None):
from commands import getoutput
ret = ""
# If they are identical don't output anything.
if oldsrpm == newsrpm:
return ret
orpm = ReadRpm(oldsrpm)
if orpm.readHeader(rpmsigtag, rpmtag):
return ret
nrpm = ReadRpm(newsrpm)
if nrpm.readHeader(rpmsigtag, rpmtag):
return ret
if sameSrcRpm(orpm, nrpm):
return ret
ret = ret + delim
ret = ret + "--- Look at changes from "
if orpm["name"] != nrpm["name"]:
ret = ret + os.path.basename(oldsrpm) + " to " + \
os.path.basename(newsrpm) + ".\n"
else:
ret = ret + orpm["name"] + " " + orpm["version"] + "-" + \
orpm["release"] + " to " + nrpm["version"] + "-" + \
nrpm["release"] + ".\n"
obuildroot = orpm.buildroot = mkstemp_dir(tmpdir) + "/"
nbuildroot = nrpm.buildroot = mkstemp_dir(tmpdir) + "/"
sed1 = "sed 's#^--- " + obuildroot + "#--- #'"
sed2 = "sed 's#^+++ " + nbuildroot + "#+++ #'"
sed = sed1 + " | " + sed2
extractRpm(orpm, obuildroot)
ofiles = orpm.getFilenames()
ospec = orpm.getSpecfile(ofiles)
extractRpm(nrpm, nbuildroot)
nfiles = nrpm.getFilenames()
nspec = nrpm.getSpecfile(nfiles)
# Search identical files and remove them. Also remove/explode
# old binary files.
for f in xrange(len(ofiles)):
if ofiles[f] not in nfiles:
if isBinary(ofiles[f]):
if explode:
explodeFile(ofiles[f], obuildroot, orpm["version"])
ret = ret + "--- " + ofiles[f] + " is removed\n"
os.unlink(obuildroot + ofiles[f])
continue
g = nfiles.index(ofiles[f])
if (orpm["filemd5s"][f] == nrpm["filemd5s"][g] and
f != ospec and g != nspec):
os.unlink(obuildroot + ofiles[f])
os.unlink(nbuildroot + nfiles[g])
# Search new binary files.
for f in nfiles:
if not isBinary(f) or f in ofiles:
continue
if explode:
explodeFile(f, nbuildroot, nrpm["version"])
ret = ret + "--- " + f + " is added\n"
os.unlink(nbuildroot + f)
# List all old and new files.
ret = ret + "old:\n"
ret = ret + getoutput("ls -l " + obuildroot)
ret = ret + "\nnew:\n"
ret = ret + getoutput("ls -l " + nbuildroot)
ret = ret + "\n"
# Generate the diff for the spec file first.
if ospec != None and nspec != None:
ospec = obuildroot + ofiles[ospec]
nspec = nbuildroot + nfiles[nspec]
ret = ret + getoutput("diff -u " + ospec + " " + nspec + " | " + sed)
os.unlink(ospec)
os.unlink(nspec)
# Diff the rest.
ret = ret + getoutput("diff -urN " + obuildroot + " " + nbuildroot + \
" | " + sed)
os.system("rm -rf " + obuildroot + " " + nbuildroot)
return ret
def TreeDiff(dir1, dir2):
import glob
new = []
changed = []
files2 = os.listdir(dir2)
files2.sort()
for f in files2:
# only look at .rpm files
if f[-4:] != ".rpm":
continue
# continue if the same file already existed
if os.path.exists("%s/%s" % (dir1, f)):
continue
# read the new rpm header
rpm = ReadRpm("%s/%s" % (dir2, f))
if rpm.readHeader(rpmsigtag, rpmtag):
print "Cannot read %s.\n" % f # XXX traceback instead of print?
continue
# Is there a previous rpm?
oldf = glob.glob("%s/%s*" % (dir1, rpm["name"]))
if not oldf:
# No previous, so list this as new package.
new.append("New package %s\n\t%s\n" % (rpm["name"],
rpm["summary"][0]))
else:
# Output the new changes:
orpm = ReadRpm(oldf[0])
if orpm.readHeader(rpmsigtag, rpmtag):
print "Cannot read %s.\n" % oldf[0]
continue
(changelognum, changelogtime) = getChangeLogFromRpm(rpm, orpm)
clist = "\n"
if changelognum != -1 or changelogtime != None:
clist = rpm.getChangeLog(changelognum, changelogtime)
nvr = rpm.getNVR()
changed.append("%s (from %s-%s)\n%s\n%s" % (nvr, orpm["version"],
orpm["release"], "-" * len(nvr), clist))
# List all removed packages:
removed = []
files1 = os.listdir(dir1)
files1.sort()
for f in files1:
# only look at .rpm files
if f[-4:] != ".rpm":
continue
# continue if the same file still exists
if os.path.exists("%s/%s" % (dir2, f)):
continue
# read the old rpm header
rpm = ReadRpm("%s/%s" % (dir1, f))
if rpm.readHeader(rpmsigtag, rpmtag):
print "Cannot read %s.\n" % f
continue
# Is there a new rpm?
if not glob.glob("%s/%s*" % (dir2, rpm["name"])):
removed.append("Removed package %s\n" % rpm["name"])
if not changed:
changed = ["(none)",]
return "".join(("\n".join(new), "\n\n", "\n".join(removed),
"\n\nUpdated Packages:\n\n", "".join(changed)))
class HashList:
""" hash list """
def __init__(self):
self.list = []
self.hash = {}
self.__len__ = self.list.__len__
self.__repr__ = self.list.__repr__
self.index = self.list.index
self.has_key = self.hash.has_key
self.keys = self.hash.keys
self.get = self.hash.get
def __getitem__(self, key):
if isinstance(key, IntType):
return self.list[key]
return self.hash.get(key)
def __contains__(self, key):
if isinstance(key, IntType):
return self.list.__contains__(key)
return self.hash.__contains__(key)
def __setitem__(self, key, value):
if not self.hash.has_key(key):
self.list.append(key)
self.hash[key] = value
return value
def __delitem__(self, key):
if self.hash.has_key(key):
del self.hash[key]
self.list.remove(key)
return key
return None
def pop(self, idx):
key = self.list.pop(idx)
del self.hash[key]
return key
def add(self, key, value):
if not key in self:
self[key] = []
self[key].append(value)
def extend(self, key, value):
if not key in self:
self[key] = []
self[key].extend(value)
def remove(self, key, value):
l = self[key]
l.remove(value)
if len(l) == 0:
del self[key]
def setdefault(self, key, defvalue):
if not self.has_key(key):
self[key] = defvalue
return self[key]
# Exact reimplementation of glibc's bsearch algorithm. Used by /bin/rpm to
# generate dirnames, dirindexes and basenames from oldfilenames (and we want
# to do it the same way).
def bsearch(key, list2):
l = 0
u = len(list2)
while l < u:
idx = (l + u) // 2
r = cmp(key, list2[idx])
if r < 0:
u = idx
elif r > 0:
l = idx + 1
else:
return idx
return -1
def pathsplit(filename):
i = filename.rfind("/") + 1
return (filename[:i].rstrip("/") or "/", filename[i:])
#return os.path.split(filename)
def pathdirname(filename):
j = filename.rfind("/") + 1
return filename[:j].rstrip("/") or "/"
#return pathsplit(filename)[0]
#return os.path.dirname(filename)
def pathsplit2(filename):
i = filename.rfind("/") + 1
return (filename[:i], filename[i:])
#(dirname, basename) = os.path.split(filename)
#if dirname[-1:] != "/" and dirname != "":
# dirname += "/"
#return (dirname, basename)
def genBasenames(oldfilenames):
"""Split oldfilenames into basenames, dirindexes, dirnames. Do this
exactly like /bin/rpm does. A faster version would cache the last result
and also use "dirindex = dirnames.index(dirname)", but we use this only
to verify rpm packages until now."""
(basenames, dirindexes, dirnames) = ([], [], [])
for filename in oldfilenames:
(dirname, basename) = pathsplit2(filename)
dirindex = bsearch(dirname, dirnames)
if dirindex < 0:
dirindex = len(dirnames)
dirnames.append(dirname)
basenames.append(basename)
dirindexes.append(dirindex)
return (basenames, dirindexes, dirnames)
def genBasenames2(oldfilenames):
(basenames, dirnames) = ([], [])
for filename in oldfilenames:
(dirname, basename) = pathsplit2(filename)
basenames.append(basename)
dirnames.append(dirname)
return (basenames, dirnames)
class FilenamesList:
"""A mapping from filenames to rpm packages."""
def __init__(self, checkfileconflicts):
self.checkfileconflicts = checkfileconflicts
self.path = {} # dirname => {basename => (RpmPackage, index)}
def addPkg(self, pkg):
"""Add all files from RpmPackage pkg to self."""
path = self.path
basenames = pkg["basenames"]
if basenames != None:
dirindexes = pkg["dirindexes"]
dirnames = pkg["dirnames"]
for dirname in dirnames:
path.setdefault(dirname, {})
# python-only
dirnames = [ dirnames[di] for di in dirindexes ]
# python-only-end
# pyrex-code
#dirnames2 = []
#for di in dirindexes:
# dirnames2.append(dirnames[di])
#dirnames = dirnames2
# pyrex-code-end
else:
if pkg["oldfilenames"] == None:
return
# genBasenames2() is called for addPkg() and removePkg()
(basenames, dirnames) = genBasenames2(pkg["oldfilenames"])
for dirname in dirnames:
path.setdefault(dirname, {})
if self.checkfileconflicts:
for i in xrange(len(basenames)):
path[dirnames[i]].setdefault(basenames[i], []).append((pkg, i))
else:
for i in xrange(len(basenames)):
path[dirnames[i]].setdefault(basenames[i], []).append(pkg)
def removePkg(self, pkg):
"""Remove all files from RpmPackage pkg from self."""
basenames = pkg["basenames"]
if basenames != None:
dirindexes = pkg["dirindexes"]
dirnames = pkg["dirnames"]
# python-only
dirnames = [ dirnames[di] for di in dirindexes ]
# python-only-end
# pyrex-code
#dirnames2 = []
#for di in dirindexes:
# dirnames2.append(dirnames[di])
#dirnames = dirnames2
# pyrex-code-end
else:
if pkg["oldfilenames"] == None:
return
(basenames, dirnames) = genBasenames2(pkg["oldfilenames"])
if self.checkfileconflicts:
for i in xrange(len(basenames)):
self.path[dirnames[i]][basenames[i]].remove((pkg, i))
else:
for i in xrange(len(basenames)):
self.path[dirnames[i]][basenames[i]].remove(pkg)
def searchDependency(self, name, returnall=0):
"""Return list of packages providing file with name."""
(dirname, basename) = pathsplit2(name)
ret = self.path.get(dirname, {}).get(basename, [])
if self.checkfileconflicts and returnall == 0:
# python-only
return [ r[0] for r in ret ]
# python-only-end
# pyrex-code
#ret2 = []
#for r in ret:
# ret2.append(r[0])
#return ret2
# pyrex-code-end
return ret
def searchStartsWith(self, name):
"""Return list of packages which have dir names starting with name."""
namelen = len(name)
rpms = []
for (dirname, basenames) in self.path.iteritems():
if dirname[:namelen] == name:
for (basename, x) in basenames.iteritems():
for (rpm, i) in x:
if rpm not in rpms:
rpms.append(rpm)
return rpms
# split EVR string in epoch, version and release
def evrSplit(evr):
epoch = "0"
i = evr.find(":")
if i != -1 and evr[:i].isdigit():
epoch = evr[:i]
j = evr.rfind("-", i + 1)
if j != -1:
return (epoch, evr[i + 1:j], evr[j + 1:])
return (epoch, evr[i + 1:], "")
flagmap2 = {
RPMSENSE_EQUAL: "=",
RPMSENSE_LESS: "<",
RPMSENSE_GREATER: ">",
RPMSENSE_EQUAL | RPMSENSE_LESS: "<=",
RPMSENSE_EQUAL | RPMSENSE_GREATER: ">="
}
def depString(name, flag, version):
if version == "":
return name
return "(%s %s %s)" % (name, flagmap2[flag & RPMSENSE_SENSEMASK], version)
def searchDependency(name, flag, version, mydeps):
deps = mydeps.get(name, [])
if not deps:
return []
if isinstance(version, basestring):
evr = evrSplit(version)
else:
evr = version
ret = []
for (f, v, rpm) in deps:
if rpm in ret:
continue
if version == "" or rangeCompare(flag, evr, f, evrSplit(v)):
ret.append(rpm)
elif v == "":
if rpm.strict:
print "Warning:", rpm.getFilename(), \
"should have a flag/version added for the provides", \
depString(name, flag, version)
ret.append(rpm)
return ret
class RpmResolver:
def __init__(self, rpms, checkfileconflicts):
self.rpms = []
self.requires_list = {}
self.filenames_list = FilenamesList(checkfileconflicts)
self.provides_list = {}
self.obsoletes_list = {}
self.conflicts_list = {}
for r in rpms:
if r["name"] != "gpg-pubkey":
self.addPkg(r)
def addPkg(self, pkg):
self.rpms.append(pkg)
pkg.addDeps("requirename", "requireflags", "requireversion",
self.requires_list)
if pkg.issrc:
return
self.filenames_list.addPkg(pkg)
pkg.addProvides(self.provides_list)
pkg.addDeps("obsoletename", "obsoleteflags", "obsoleteversion",
self.obsoletes_list)
pkg.addDeps("conflictname", "conflictflags", "conflictversion",
self.conflicts_list)
def removePkg(self, pkg):
self.rpms.remove(pkg)
pkg.removeDeps("requirename", "requireflags", "requireversion",
self.requires_list)
if pkg.issrc:
return
self.filenames_list.removePkg(pkg)
pkg.removeProvides(self.provides_list)
pkg.removeDeps("obsoletename", "obsoleteflags", "obsoleteversion",
self.obsoletes_list)
pkg.removeDeps("conflictname", "conflictflags", "conflictversion",
self.conflicts_list)
def searchDependency(self, name, flag, version):
s = searchDependency(name, flag, version, self.provides_list)
if name[0] == "/" and version == "":
s += self.filenames_list.searchDependency(name)
return s
OP_INSTALL = "install"
OP_UPDATE = "update"
OP_ERASE = "erase"
OP_FRESHEN = "freshen"
def operationFlag(flag, operation):
"""Return dependency flag for RPMSENSE_* flag during operation."""
if (isLegacyPreReq(flag) or
(operation == OP_ERASE and isErasePreReq(flag)) or
(operation != OP_ERASE and isInstallPreReq(flag))):
return 1
return 0
class RpmRelation:
"""Pre and post relations for a package (a node in the dependency
graph)."""
def __init__(self):
self.pre = {} # RpmPackage => flag
self.post = {} # RpmPackage => 1 (value is not used)
self.weight = 0 # # of pkgs depending on this package
self.weight_edges = 0
def __str__(self):
return "%d %d" % (len(self.pre), len(self.post))
class RpmRelations(dict):
"""List of relations for each package (a dependency graph)."""
# RpmPackage => RpmRelation
def __init__(self, rpms):
dict.__init__()
for pkg in rpms:
self[pkg] = RpmRelation()
def addRelation(self, pkg, pre, flag):
"""Add an arc from RpmPackage pre to RpmPackage pkg with flag.
pre can be None to add pkg to the graph with no arcs."""
i = self[pkg]
if flag or pre not in i.pre:
# prefer hard requirements, do not overwrite with soft req
i.pre[pre] = flag
self[pre].post[pkg] = 1
def remove(self, pkg):
"""Remove RpmPackage pkg from the dependency graph."""
rel = self[pkg]
# remove all post relations for the matching pre relation packages
for r in rel.pre:
del self[r].post[pkg]
# remove all pre relations for the matching post relation packages
for r in rel.post:
del self[r].pre[pkg]
del self[pkg]
def removeRelation(self, node, next):
"""Drop the "RpmPackage node requires RpmPackage next" arc."""
del self[node].pre[next]
del self[next].post[node]
def collect(self, pkg, order):
"""Move package from the relations graph to the order list
Handle ConnectedComponent."""
if isinstance(pkg, ConnectedComponent):
pkg.breakUp(order)
else:
order.append(pkg)
self.remove(pkg)
def separatePostLeafNodes(self, list2):
"""Move topologically sorted "trailing" packages from
orderer.RpmRelations relations to start of list2.
Stop when each remaining package has successor (implies a dependency
loop)."""
i = 0
found = 0
while len(self) > 0:
pkg = self[i]
if len(self[pkg].post) == 0:
list2.append(pkg)
self.remove(pkg)
found = 1
else:
i += 1
if i == len(self):
if found == 0:
break
i = 0
found = 0
def _calculateWeights2(self, pkg, leafs):
"""For each package generate a dict of all packages that depend on it.
At last use the length of the dict as weight."""
# Uncomment weight line in ConnectedComponent.__init__() to use this
if self[pkg].weight == 0:
weight = {pkg: pkg}
else:
weight = self[pkg].weight
for p in self[pkg].pre:
rel = self[p]
if rel.weight == 0:
rel.weight = weight.copy()
rel.weight[p] = p
else:
rel.weight.update(weight)
rel.weight_edges += 1
if rel.weight_edges == len(rel.post):
leafs.append(p)
if self[pkg].weight == 0:
self[pkg].weight = 1
else:
self[pkg].weight = len(weight)
def _calculateWeights(self, pkg, leafs):
"""Weight of a package is sum of the (weight+1) of all packages
depending on it."""
weight = self[pkg].weight + 1
for p in self[pkg].pre:
rel = self[p]
rel.weight += weight
rel.weight_edges += 1
if rel.weight_edges == len(rel.post):
leafs.append(p)
def calculateWeights(self):
leafs = []
for pkg in self:
if not self[pkg].post: # post leaf node
self._calculateWeights(pkg, leafs)
while leafs:
self._calculateWeights(leafs.pop(), leafs)
weights = {}
for pkg in self:
weights.setdefault(self[pkg].weight, []).append(pkg)
return weights
def processLeafNodes(self, order, leaflist=None):
"""Move topologically sorted "trailing" packages from
orderer.RpmRelations relations to start of list."""
if leaflist is None:
leaflist = self # loop over all pkgs
# do a bucket sort
leafs = {} # len(post) -> [leaf pkgs]
for pkg in leaflist:
if not self[pkg].pre:
post = len(self[pkg].post)
leafs.setdefault(post, []).append(pkg)
if leafs:
max_post = max(leafs)
while leafs:
# remove leaf node
leaf = leafs[max_post].pop()
rels = self[leaf]
self.collect(leaf, order)
#self.config.printDebug(2, "%s" % (leaf.getNEVRA()))
# check post nodes if they got a leaf now
new_max = max_post
for pkg in rels.post:
if not self[pkg].pre:
post = len(self[pkg].post)
leafs.setdefault(post, []).append(pkg)
if post > new_max:
new_max = post
# select new (highest) bucket
if not leafs[max_post]:
del leafs[max_post]
if leafs:
max_post = max(leafs)
else:
max_post = new_max
def genOrder(self):
"""Order rpms in orderer.RpmRelations relations.
Return an ordered list of RpmPackage's on success, None on error."""
length = len(self)
order = []
connected_components = ConnectedComponentsDetector(self).detect(self)
#if connected_components:
# self.config.printDebug(1, "-- STRONLY CONNECTED COMPONENTS --")
# if self.config.debug > 1:
# for i in xrange(len(connected_components)):
# s = ", ".join([pkg.getNEVRA() for pkg in
# connected_components[i].pkgs])
# self.config.printDebug(2, " %d: %s" % (i, s))
self.processLeafNodes(order)
if len(order) != length:
print "%d Packages of %d in order list! Number of connected " \
"components: %d " % (len(order), length,
len(connected_components))
raise
return order
class ConnectedComponent:
"""Contains a Strongly Connected Component (SCC).
This is a (maximal) set of nodes that are all reachable from
each other. In other words the component consists of loops touching
each other.
Automatically changes all relations of its pkgs from/to outside the
component to itself. After all components have been created the relations
graph is cycle free.
Mimics RpmPackage.
"""
def __init__(self, relations, pkgs):
"""relations: the RpmRelations object containing the loops."""
self.relations = relations
# add myself to the list
relations[self] = RpmRelation()
self.pkgs = {}
for pkg in pkgs:
self.pkgs[pkg] = pkg
relations[pkg].weight = -1
# remove all relations this connected component is replacing
for pkg in pkgs:
to_remove = []
for pre in relations[pkg].pre:
if not pre in self.pkgs:
to_remove.append(pre)
for p in to_remove:
flag = relations[pkg].pre[p]
relations.removeRelation(pkg, p)
relations.addRelation(self, p, flag)
to_remove = []
for post in relations[pkg].post:
if not post in self.pkgs:
to_remove.append(post)
for p in to_remove:
flag = relations[pkg].post[p]
relations.removeRelation(p, pkg)
relations.addRelation(p, self, flag)
relations[self].weight = len(self.pkgs)
# uncomment for use of the dict based weight algorithm
# relations[self].weight = self.pkgs.copy()
def __len__(self):
return len(self.pkgs)
def __str__(self):
return repr(self)
def getNEVRA(self):
# python-only
return "Component: " + \
",".join([ pkg.getNEVRA() for pkg in self.pkgs ])
# python-only-end
# pyrex-code
#ret = []
#for pkg in self.pkgs:
# ret.append(pkg.getNEVRA())
#return "Component: " + ",".join(ret)
# pyrex-code-end
def processLeafNodes(self, order):
"""Remove all leaf nodes with the component and append them to order.
"""
while 1:
# Without the requirement of max(rel.pre) this could be O(1)
next = None
next_post_len = -1
for pkg in self.pkgs:
if (len(self.relations[pkg].pre) == 0 and
len(self.relations[pkg].post) > next_post_len):
next = pkg
next_post_len = len(self.relations[pkg].post)
if next:
self.relations.collect(next, order)
del self.pkgs[next]
else:
return
def removeSubComponent(self, component):
"""Remove all packages of a sub component from own package list."""
for pkg in component.pkgs:
del self.pkgs[pkg]
def breakUp(self, order):
hard_requirements = []
for pkg in self.pkgs:
for (p, req) in self.relations[pkg].pre.iteritems():
if req:
hard_requirements.append((pkg, p))
# pick requirement to delete
weights = {}
# calculate minimal distance to a pre req
for (pkg, nextpkg) in hard_requirements:
# dijkstra
edge = [nextpkg]
weights[nextpkg] = 0
while edge:
node = edge.pop()
weight = weights[node] + 1
for (next_node, ishard) in self.relations[node].pre.iteritems():
if ishard:
continue
w = weights.get(next_node, None)
if w is not None and w < weight:
continue
weights[next_node] = weight
edge.append(next_node)
edge.sort()
edge.reverse()
if weights:
# get pkg with largest minimal distance
weight = -1
for (p, w) in weights.iteritems():
if w > weight:
(weight, pkg2) = (w, p)
# get the predesessor with largest minimal distance
weight = -1
for p in self.relations[pkg2].post:
w = weights[p]
if w > weight:
(weight, pkg1) = (w, p)
else:
# search the relation that will most likely set a pkg free:
# relations that are the last post (pre) of the start (end) pkg
# are good, if there are lots of pre/post at the side
# where the relation is the last it is even better
# to make less relations better we use the negative values
weight = None
for p1 in self.pkgs:
pre = len(self.relations[p1].pre)
post = len(self.relations[p1].post)
for p2 in self.relations[p1].pre.iterkeys():
pre2 = len(self.relations[p2].pre)
post2 = len(self.relations[p2].post)
if pre < post2: # start is more interesting
w = (-pre, post, -post2, pre)
elif pre > post2: # end is more interesting
w = (-post2, pre2, -pre, post2)
else: # == both same, add the numbers of per and post
w = (-pre, post+pre2)
if w > weight:
# python handles comparison of tuples from left to
# right (like strings)
weight = w
(pkg1, pkg2) = (p1, p2)
if self.relations[pkg1].pre[pkg2]:
print "Breaking pre requirement for %s: %s" % (pkg1.getNEVRA(),
pkg2.getNEVRA())
# remove this requirement
self.relations.removeRelation(pkg1, pkg2)
# rebuild components
components = ConnectedComponentsDetector(self.relations
).detect(self.pkgs)
for component in components:
self.removeSubComponent(component)
self.pkgs[component] = component
# collect nodes
self.processLeafNodes(order)
class ConnectedComponentsDetector:
"""Use Gabow algorithm to detect strongly connected components:
Do a depth first traversal and number the nodes.
"root node": the node of a SCC that is visited first
Keep two stacks:
1. stack of all still possible root nodes
2. stack of all visited but still unknown nodes (pkg stack)
If we reach a unknown node just descent.
If we reach an unprocessed node it has a smaller number than the
node we came from and all nodes with higher numbers than this
node can be reach from it. So we must remove those nodes
from the root stack.
If we reach a node already processed (part of a SCC (of possibly
only one node size)) there is no way form this node to our current.
Just ignore this way.
If we go back in the recursion the following can happen:
1. Our node has been removed from the root stack. It is part of a
SCC -> do nothing
2. Our node is top on the root stack: the pkg stack contains a SCC
from the position of our node up -> remove it including our node
also remove the node from the root stack
"""
def __init__(self, relations):
self.relations = relations
def detect(self, pkgs):
"""Returns a list of all strongly ConnectedComponents."""
self.states = {} # attach numbers to packages
self.root_stack = [] # stack of possible root nodes
self.pkg_stack = [] # stack of all nodes visited and not processed yet
self.sccs = [] # already found strongly connected components
self.pkg_cnt = 0 # number of current package
# continue until all nodes have been visited
for pkg in pkgs:
if pkg not in self.states:
self._process(pkg)
# python-only
return [ ConnectedComponent(self.relations, pkgs) \
for pkgs in self.sccs ]
# python-only-end
# pyrex-code
#ret = []
#for pkgs in self.sccs:
# ret.append(ConnectedComponent(self.relations, pkgs))
#return ret
# pyrex-code-end
def _process(self, pkg):
"""Descent recursivly"""
states = self.states
root_stack = self.root_stack
pkg_stack = self.pkg_stack
self.pkg_cnt += 1
states[pkg] = self.pkg_cnt
# push pkg to both stacks
pkg_stack.append(pkg)
root_stack.append(pkg)
for next in self.relations[pkg].pre:
if next in states:
if states[next] > 0:
# if visited but not finished
# remove all pkgs with higher number from root stack
i = len(root_stack) - 1
while i >= 0 and states[root_stack[i]] > states[next]:
i -= 1
del root_stack[i + 1:]
else:
# visit
self._process(next)
# going up in the recursion
# if pkg is a root node (top on root stack)
if root_stack[-1] is pkg:
if pkg_stack[-1] is pkg:
# only one node SCC, drop it
pkg_stack.pop()
states[pkg] = 0 # set to "already processed"
else:
# get non trivial SCC from stack
idx = pkg_stack.index(pkg)
scc = pkg_stack[idx:]
del pkg_stack[idx:]
for p in scc:
states[p] = 0 # set to "already processed"
self.sccs.append(scc)
root_stack.pop()
class RpmOrderer:
def __init__(self, installs, updates, obsoletes, erases, resolver):
"""Initialize.
installs is a list of added RpmPackage's
erases a list of removed RpmPackage's (including updated/obsoleted)
updates is a hash: new RpmPackage => ["originally" installed RpmPackage
removed by update]
obsoletes is a hash: new RpmPackage => ["originally" installed
RpmPackage removed by update]
installs, updates and obsoletes can be None."""
self.installs = installs
self.updates = updates
# Only explicitly removed packages, not updated/obsoleted.
self.erases = erases
if self.updates:
for pkg in self.updates:
for p in self.updates[pkg]:
if p in self.erases:
self.erases.remove(p)
self.obsoletes = obsoletes
if self.obsoletes:
for pkg in self.obsoletes:
for p in self.obsoletes[pkg]:
if p in self.erases:
self.erases.remove(p)
self.resolver = resolver
def _genEraseOps(self, list2):
"""Return a list of (operation, RpmPackage) for erasing RpmPackage's
in list2."""
if len(list2) == 1:
return [(OP_ERASE, list2[0])]
return RpmOrderer({}, {}, {}, list2, self.resolver).order()
def genRelations(self, rpms, operation):
"""Return orderer.Relations between RpmPackage's in list rpms for
operation."""
resolver = self.resolver
relations = RpmRelations(rpms)
for ((n, f, v), rpms) in resolver.requires_list.iteritems():
if n[:7] in ("rpmlib(", "config("):
continue
resolved = resolver.searchDependency(n, f, v)
if resolved:
f2 = operationFlag(f, operation)
for pkg in rpms:
if pkg in resolved: # ignore deps resolved also by itself
continue
i = relations[pkg]
for pre in resolved:
# prefer hard requirements, do not overwrite with soft req
if f2 or pre not in i.pre:
i.pre[pre] = f2
relations[pre].post[pkg] = 1
return relations
def genOperations(self, order):
"""Return a list of (operation, RpmPackage) tuples from ordered list of
RpmPackage's order."""
operations = []
for r in order:
if r in self.erases:
operations.append((OP_ERASE, r))
else:
if self.updates and r in self.updates:
op = OP_UPDATE
else:
op = OP_INSTALL
operations.append((op, r))
if self.obsoletes and r in self.obsoletes:
operations.extend(self._genEraseOps(self.obsoletes[r]))
if self.updates and r in self.updates:
operations.extend(self._genEraseOps(self.updates[r]))
return operations
def order(self):
order = []
if self.installs:
relations = self.genRelations(self.installs, OP_INSTALL)
order2 = relations.genOrder()
if order2 == None:
return None
order.extend(order2)
if self.erases:
relations = self.genRelations(self.erases, OP_ERASE)
order2 = relations.genOrder()
if order2 == None:
return None
order2.reverse()
order.extend(order2)
return self.genOperations(order)
def selectNewestRpm(rpms, arch_hash, verbose):
"""Select one package out of rpms that has the highest version
number."""
newest = rpms[0]
newestarch = arch_hash.get(newest.getArch(), 999)
for rpm in rpms[1:]:
rpmarch = arch_hash.get(rpm.getArch(), 999)
if (rpmarch < newestarch or
(rpmarch == newestarch and pkgCompare(newest, rpm) < 0)):
if verbose > 4:
print "select", rpm.getFilename(), "over", newest.getFilename()
newest = rpm
newestarch = rpmarch
else:
if verbose > 4:
print "select", newest.getFilename(), "over", \
rpm.getFilename()
return newest
def getPkgsNewest(rpms, arch=None, arch_hash={}, # pylint: disable-msg=W0102
verbose=0, exactarch=1, nosrc=0):
# Add all rpms by name,arch into a hash.
h = {}
for rpm in rpms:
if rpm.issrc:
if nosrc:
if verbose > 5:
print "Removed .src.rpm:", rpm.getFilename()
continue
rarch = "src"
else:
rarch = rpm["arch"]
if not exactarch:
rarch = buildarchtranslate.get(rarch, rarch)
h.setdefault( (rpm["name"], rarch) , []).append(rpm)
# For each arch select one newest rpm.
pkgs = []
for r in h.itervalues():
pkgs.append(selectNewestRpm(r, arch_hash, verbose))
if arch:
# Add all rpms into a hash by their name.
h = {}
for rpm in pkgs:
if rpm.issrc:
if verbose > 5:
print "Removed .src.rpm:", rpm.getFilename()
continue
# Remove all rpms not suitable for this arch.
if arch_hash.get(rpm["arch"]) == None:
if verbose > 4:
print "Removed due to incompatibel arch:", \
rpm.getFilename()
continue
h.setdefault(rpm["name"], []).append(rpm)
# By name find the newest rpm and then decide if a noarch
# rpm is the newest (and all others are deleted) or if an
# arch-dependent rpm is newest (and all noarchs are removed).
for rpms in h.itervalues():
# set verbose to 0 as this is actually not selecting rpms:
newest = selectNewestRpm(rpms, arch_hash, 0)
if newest["arch"] == "noarch":
for r in rpms:
if r != newest:
pkgs.remove(r)
if verbose > 4:
print "Removed older rpm:", r.getFilename()
else:
for r in rpms:
if r["arch"] == "noarch":
pkgs.remove(r)
if verbose > 4:
print "Removed older rpm:", r.getFilename()
return pkgs
def findRpms(dirname, uselstat=None, verbose=0):
s = os.stat
if uselstat:
s = os.lstat
dirs = [dirname]
files = []
while dirs:
d = dirs.pop()
for f in os.listdir(d):
path = "%s/%s" % (d, f)
st = s(path)
if S_ISREG(st.st_mode) and f[-4:] == ".rpm":
files.append(path)
elif S_ISDIR(st.st_mode):
dirs.append(path)
else:
if verbose > 2:
print "ignoring non-rpm", path
return files
class RpmInfo:
def __init__(self, pkg):
if isinstance(pkg, ListType):
(self.filename, self.name, self.origepoch, self.version,
self.release, self.arch, self.sigdatasize, self.hdrdatasize,
self.pkgsize, self.sha1header) = pkg[:10]
self.sigdatasize = int(self.sigdatasize)
self.hdrdatasize = int(self.hdrdatasize)
self.pkgsize = int(self.pkgsize)
# python-only
self.deps = [ (pkg[i], int(pkg[i + 1]), pkg[i + 2]) \
for i in xrange(10, len(pkg), 3) ]
# python-only-end
# pyrex-code
#self.deps = []
#for i in xrange(10, len(pkg), 3):
# self.deps.append((pkg[i], int(pkg[i + 1]), pkg[i + 2]))
# pyrex-code-end
else: # if isinstance(pkg, ReadRpm):
self.filename = pkg.filename
self.name = pkg["name"]
self.origepoch = pkg.getEpoch("")
self.version = pkg["version"]
self.release = pkg["release"]
self.arch = pkg.getArch()
self.sigdatasize = pkg.sigdatasize
self.hdrdatasize = pkg.hdrdatasize
size_in_sig = pkg.sig.getOne("size_in_sig")
if size_in_sig != None:
self.pkgsize = 96 + pkg.sigdatasize + size_in_sig
elif not isUrl(self.filename):
self.pkgsize = os.stat(self.filename).st_size
else:
raise ValueError, "pkg has no size_in_sig"
self.sha1header = pkg.sig.get("sha1header", "")
self.deps = pkg.getObsoletes()
self.epoch = self.origepoch
if self.epoch == "":
self.epoch = "0"
def getCSV(self):
ret = [self.filename, self.name, self.epoch, self.version,
self.release, self.arch, str(self.sigdatasize),
str(self.hdrdatasize), str(self.pkgsize), self.sha1header]
for (n, f, v) in self.deps:
ret.extend([n, str(f), v])
return ret
class RpmCSV:
def __init__(self, data=None):
self.pkglist = []
if isinstance(data, basestring):
self.pkglist = self.readCSV(data)
def readCSV(self, filename):
lines = open(filename, "r").readlines()
csv = []
crc = lines.pop()
if not crc.startswith("# crc: ") or not crc[-1] == "\n":
#print "crc not correct"
return None
crc = int(crc[7:-1])
crcval = zlib.crc32("")
for l in lines:
crcval = zlib.crc32(l, crcval)
if l[-1:] == "\n":
l = l[:-1]
entry = l.split(",")
if len(entry) < 10:
#print "csv: not enough entries"
return None
csv.append(RpmInfo(entry))
if crcval != crc:
#print "csv: crc did not match"
return None
return csv
def addPkg(self, pkg):
self.pkglist.append(RpmInfo(pkg))
def writeCSV(self, filename, check=1):
# python-only
csv = [ pkg.getCSV() for pkg in self.pkglist ]
# python-only-end
# pyrex-code
#csv = []
#for pkg in self.pkglist:
# csv.append(pkg.getCSV())
# pyrex-code-end
# Check if any value contains a wrong character.
if check:
for l in csv:
for item in l:
if "," in item:
return None
# Write new CSV file with crc checksum.
(fd, tmp) = mkstemp_file(pathdirname(filename))
crcval = zlib.crc32("")
for l in csv:
data = ",".join(l) + "\n"
crcval = zlib.crc32(data, crcval)
os.write(fd, data)
os.write(fd, "# crc: " + str(crcval) + "\n")
os.close(fd)
os.rename(tmp, filename)
return 1
def checkCSV():
pkgs = findRpms("/home/mirror/fedora/development/i386/Fedora/RPMS")
pkgs = readRpm(pkgs, rpmsigtag, rpmtag)
csv = RpmCSV()
for p in pkgs:
csv.addPkg(p)
if csv.writeCSV("/tmp/csv") == None:
print "Cannot write csv file."
csv2 = RpmCSV("/tmp/csv")
if csv2.pkglist == None:
print "Cannot read/parse csv file."
return
csv2.writeCSV("/tmp/csv2")
def cacheLocal(urls, filename, subdir, verbose, checksum=None,
checksumtype=None, nofilename=0):
import urlgrabber
try:
from M2Crypto.SSL.Checker import WrongHost
except ImportError:
WrongHost = None
for url in urls:
if not url:
continue
url = Uri2Filename(url).rstrip("/")
if nofilename == 0:
url += filename
if verbose > 4:
print "cacheLocal: looking at url:", url
if not url.startswith("http://") and not url.startswith("ftp://"):
return url
(dirname, basename) = pathsplit(filename)
localdir = cachedir + subdir + dirname
makeDirs(localdir)
localfile = "%s/%s" % (localdir, basename)
if checksum and getChecksum(localfile, checksumtype) == checksum:
return localfile
if verbose > 5:
print "cacheLocal: localfile:", localfile
try:
f = urlgrabber.urlgrab(url, localfile,
timeout=float(urloptions["timeout"]),
retry=int(urloptions["retries"]),
keepalive=int(urloptions["keepalive"]),
proxies=urloptions["proxies"],
http_headers=urloptions["http_headers"])
except (urlgrabber.grabber.URLGrabError, WrongHost), e:
if verbose > 4:
print "cacheLocal: error: e:", e
# urlgrab fails with invalid range for already completely transfered
# files, pretty strange to me to be honest... :)
if type(e) == ListType and e[0] == 9:
if checksum and getChecksum(localfile, checksumtype) \
!= checksum:
continue
return localfile
continue
if verbose > 5:
print "cacheLocal: return:", f
if checksum and getChecksum(f, checksumtype) != checksum:
continue
return f
return None
def buildPkgRefDict(pkgs):
"""Take a list of packages and return a dict that contains all the possible
naming conventions for them: name, name.arch, name-version-release.arch,
name-version, name-version-release, epoch:name-version-release."""
pkgdict = {}
for pkg in pkgs:
(n, e, v, r, a) = (pkg["name"], pkg.getEpoch(), pkg["version"],
pkg["release"], pkg.getArch())
na = "%s.%s" % (n, a)
nv = "%s-%s" % (n, v)
nvr = "%s-%s" % (nv, r)
nvra = "%s.%s" % (nvr, a)
envra = "%s:%s" % (e, nvra)
for item in (n, na, nv, nvr, nvra, envra):
pkgdict.setdefault(item, []).append(pkg)
return pkgdict
__fnmatchre__ = re.compile(".*[\*\[\]\{\}\?].*")
def parsePackages(pkgs, requests):
"""Matches up the user request versus a pkg list. For installs/updates
available pkgs should be the 'others list' for removes it should be
the installed list of pkgs."""
matched = []
if requests:
pkgdict = buildPkgRefDict(pkgs)
for request in requests:
if request in pkgdict:
matched.extend(pkgdict[request])
elif __fnmatchre__.match(request):
import fnmatch
regex = re.compile(fnmatch.translate(request))
for item in pkgdict.iterkeys():
if regex.match(item):
matched.extend(pkgdict[item])
return matched
def escape(s):
"""Return escaped string converted to UTF-8. Return None if the string is
empty, so the newChild method does not add text node."""
if not s:
return None
s = s.replace("&", "&")
if isinstance(s, unicode):
return s
try:
x = unicode(s, "ascii")
return s
except UnicodeError:
encodings = ("utf-8", "iso-8859-1", "iso-8859-15", "iso-8859-2")
for enc in encodings:
try:
x = unicode(s, enc)
except UnicodeError:
pass
else:
if x.encode(enc) == s:
return x.encode("utf-8")
newstring = ""
for char in s:
if ord(char) > 127:
newstring = newstring + "?"
else:
newstring = newstring + char
return re.sub("\n$", "", newstring)
flagmap = {
None: None,
"EQ": RPMSENSE_EQUAL,
"LT": RPMSENSE_LESS,
"GT": RPMSENSE_GREATER,
"LE": RPMSENSE_EQUAL | RPMSENSE_LESS,
"GE": RPMSENSE_EQUAL | RPMSENSE_GREATER,
"": 0,
RPMSENSE_EQUAL: "EQ",
RPMSENSE_LESS: "LT",
RPMSENSE_GREATER: "GT",
RPMSENSE_EQUAL | RPMSENSE_LESS: "LE",
RPMSENSE_EQUAL | RPMSENSE_GREATER: "GE"
}
# Files included in primary.xml.
filerc = re.compile("^(.*bin/.*|/etc/.*|/usr/lib/sendmail)$")
dirrc = re.compile("^(.*bin/.*|/etc/.*)$")
def utf8String(string):
"""hands back a unicoded string"""
if string == None:
return ""
if isinstance(string, unicode):
return string
try:
x = unicode(string, "ascii")
return string
except UnicodeError:
for enc in ("utf-8", "iso-8859-1", "iso-8859-15", "iso-8859-2"):
try:
x = unicode(string, enc)
except UnicodeError:
pass
else:
if x.encode(enc) == string:
return x.encode("utf-8")
newstring = ""
for char in string:
if ord(char) > 127:
newstring += "?"
else:
newstring += char
return newstring
def open_fh(filename):
if filename[-3:] == ".gz":
#return gzip.open(filename, "r")
return PyGZIP(filename, None, None, None)
return open(filename, "r")
def read_repodata(elem):
p = {}
p["type"] = elem.attrib.get("type")
for child in elem:
if child.tag == "{http://linux.duke.edu/metadata/repo}location":
p["location"] = child.attrib.get("href")
p["base"] = child.attrib.get("base")
elif child.tag == "{http://linux.duke.edu/metadata/repo}checksum":
p["checksum"] = child.text
p["checksum_type"] = child.attrib.get("type")
elif child.tag == "{http://linux.duke.edu/metadata/repo}open-checksum":
p["openchecksum"] = child.text
p["openchecksum_type"] = child.attrib.get("type")
elif child.tag == "{http://linux.duke.edu/metadata/repo}timestamp":
p["timestamp"] = child.text
return p
def read_repomd(filename):
fh = open(filename, "r")
if not fh:
return None
o = {}
for (_, elem) in iterparse(fh).__iter__():
if elem.tag == "{http://linux.duke.edu/metadata/repo}data":
p = read_repodata(elem)
o[p["type"]] = p
elem.clear()
return o
def _bn(qn):
return qn[qn.find("}") + 1:]
def _prefixprops(elem, prefix):
prefix += "_"
ret = {}
for (key, value) in elem.attrib.iteritems():
ret[prefix + _bn(key)] = value
return ret
def read_primary(filename, verbose=5):
fh = open_fh(filename)
if not fh:
return None
ret = {}
p = {}
files = {}
prco = {}
for (_, elem) in iterparse(fh).__iter__():
name = _bn(elem.tag)
if name in ("name", "arch", "summary", "description", "url",
"packager"):
p[name] = elem.text
elif name == "version": # epoch, ver, rel
p.update(elem.attrib)
elif name in ("time", "size"):
# time: file, build. size: package, installed, archive.
p.update(_prefixprops(elem, name))
elif name in ("checksum", "location"):
p.update(_prefixprops(elem, name))
p[name + "_value"] = elem.text
elif name == "metadata":
pass
elif name == "package":
p["file"] = files
ret[p["name"] + p["ver"] + "-" + p["rel"]] = p
p = {}
files = {}
elif name == "entry":
pass
elif name == "format":
pass
elif name == "file":
files[elem.text] = elem.get("type", "file")
elif name in ("license", "vendor", "group", "buildhost", "sourcerpm"):
p[name] = elem.text
elif name in ("provides", "requires", "conflicts", "obsoletes"):
prco[name] = [ c2.attrib for c2 in elem ]
elif name == "header-range":
p.update(_prefixprops(elem, "rpm_header"))
elif verbose > 4:
print "new primary tag:", name
#elem.clear()
return ret
def testRepo():
release = "/home/mirror/fedora/development/i386/os"
time1 = time.clock()
for _ in xrange(1000):
read_repomd(release + "/repodata/repomd.xml")
print time.clock() - time1, "milisec to read one repomd"
print read_repomd(release + "/repodata/repomd.xml")
time1 = time.clock()
for _ in xrange(5):
read_primary(release + "/repodata/primary.xml.gz")
print (time.clock() - time1) / 5.0, "sec to read primary"
print read_primary(release + "/repodata/primary.xml.gz")
def getProps(reader):
Namef = reader.Name
Valuef = reader.Value
MoveToNextAttributef = reader.MoveToNextAttribute
props = {}
while MoveToNextAttributef():
props[Namef()] = Valuef()
return props
class RpmRepo:
def __init__(self, filenames, excludes, verbose, reponame="default",
readsrc=0, fast=1):
self.filenames = filenames
self.filename = None
self.excludes = excludes.strip(" \t,;")
self.excludes = self.excludes.split(" \t,;")
self.verbose = verbose
self.reponame = reponame
self.readsrc = readsrc
self.filelist_imported = 0
self.checksum = "sha" # "sha" or "md5"
self.pretty = 1
self.pkglist = {}
self.groupfile = None
self.fast = fast
self.repomd = None
def read(self, onlyrepomd=0, readgroupfile=0):
for filename in self.filenames:
if not filename or filename[:1] == "#":
continue
if self.verbose > 2:
print "Reading yum repository %s." % filename
self.filename = filename
repomd = cacheLocal([filename], "/repodata/repomd.xml",
self.reponame + "/repo", self.verbose)
if not repomd:
continue
reader = libxml2.newTextReaderFilename(repomd)
if reader == None:
continue
self.repomd = self.__parseRepomd(reader)
if not self.repomd:
continue
if onlyrepomd:
return 1
repoprimary = self.repomd.get("primary", {})
pchecksum = repoprimary.get("checksum", "no")
pchecksumtype = repoprimary.get("checksum_type", "md5")
primary = cacheLocal([filename], "/repodata/primary.xml.gz",
self.reponame + "/repo", self.verbose, pchecksum,
pchecksumtype)
if not primary:
continue
reader = libxml2.newTextReaderFilename(primary)
if reader == None:
continue
self.__parsePrimary(reader)
self.__removeExcluded()
repogroupfile = self.repomd.get("group", {})
groupfile = repogroupfile.get("location")
if readgroupfile and groupfile:
gchecksum = repogroupfile.get("checksum", "no")
gchecksumtype = repogroupfile.get("checksum_type", "md5")
groupfile = cacheLocal([filename], "/" + groupfile,
self.reponame + "/repo", self.verbose, gchecksum,
gchecksumtype)
if not groupfile:
continue
self.groupfile = groupfile
# Now parse the groupfile?
return 1
return 0
def importFilelist(self):
if self.filelist_imported:
return 1
if self.verbose > 2:
print "Reading full filelist from %s." % self.filename
repofilelists = self.repomd.get("filelists", {})
fchecksum = repofilelists.get("checksum", "no")
fchecksumtype = repofilelists.get("checksum_type", "md5")
filelists = cacheLocal([self.filename], "/repodata/filelists.xml.gz",
self.reponame + "/repo", self.verbose, fchecksum, fchecksumtype)
if not filelists:
return 0
reader = libxml2.newTextReaderFilename(filelists)
if reader == None:
return 0
self.__parseFilelist(reader)
self.filelist_imported = 1
return 1
def createRepo(self, baseurl, ignoresymlinks, groupfile):
filename = Uri2Filename(self.filenames[0]).rstrip("/")
if self.verbose >= 2:
print "Creating yum metadata repository for dir %s:" % filename
rt = {}
for i in ("name", "epoch", "version", "release", "arch",
"requirename"):
value = rpmtag[i]
rt[i] = value
rt[value[0]] = value
filenames = findRpms(filename, ignoresymlinks)
filenames.sort()
if self.verbose >= 2:
print "Reading %d .rpm files:" % len(filenames)
printhash = PrintHash(len(filenames), 60)
i = 0
while i < len(filenames):
if self.verbose >= 2:
printhash.nextObject()
path = filenames[i]
pkg = ReadRpm(path)
if pkg.readHeader({}, rt):
print "Cannot read %s.\n" % path
continue
pkg.closeFd()
if self.excludes and self.__isExcluded(pkg):
filenames.pop(i)
continue
i += 1
if self.verbose >= 2:
printhash.nextObject(finish=1)
print "Writing repo data for %d .rpm files:" % len(filenames)
printhash = PrintHash(len(filenames), 60)
numpkg = len(filenames)
repodir = filename + "/repodata"
makeDirs(repodir)
(origpfd, pfdtmp) = mkstemp_file(repodir, special=1)
pfd = GzipFile(fileobj=origpfd, mode="wb")
if not pfd:
return 0
firstlinexml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
pfd.write(firstlinexml)
pfd.write("<metadata xmlns=\"http://linux.duke.edu/metadata/common\"" \
" xmlns:rpm=\"http://linux.duke.edu/metadata/rpm\" " \
"packages=\"%d\">\n" % numpkg)
(origffd, ffdtmp) = mkstemp_file(repodir, special=1)
ffd = GzipFile(fileobj=origffd, mode="wb")
if not ffd:
return 0
ffd.write(firstlinexml)
ffd.write("<filelists xmlns=\"http://linux.duke.edu/metadata/" \
"filelists\" packages=\"%d\">\n" % numpkg)
(origofd, ofdtmp) = mkstemp_file(repodir, special=1)
ofd = GzipFile(fileobj=origofd, mode="wb")
if not ofd:
return 0
ofd.write(firstlinexml)
ofd.write("<otherdata xmlns=\"http://linux.duke.edu/metadata/other\"" \
" packages=\"%s\">\n" % numpkg)
pdoc = libxml2.newDoc("1.0")
proot = pdoc.newChild(None, "metadata", None)
basens = proot.newNs("http://linux.duke.edu/metadata/common", None)
formatns = proot.newNs("http://linux.duke.edu/metadata/rpm", "rpm")
proot.setNs(basens)
fdoc = libxml2.newDoc("1.0")
froot = fdoc.newChild(None, "filelists", None)
filesns = froot.newNs("http://linux.duke.edu/metadata/filelists", None)
froot.setNs(filesns)
odoc = libxml2.newDoc("1.0")
oroot = odoc.newChild(None, "otherdata", None)
otherns = oroot.newNs("http://linux.duke.edu/metadata/other", None)
oroot.setNs(otherns)
for path in filenames:
if self.verbose >= 2:
printhash.nextObject()
pkg = ReadRpm(path)
if pkg.readHeader(rpmsigtag, rpmtag):
print "Cannot read %s.\n" % path
continue
pkg["yumlocation"] = path[len(filename) + 1:]
pkg["yumchecksum"] = getChecksum(pkg.filename, self.checksum)
self.__writePrimary(pfd, proot, pkg, formatns)
self.__writeFilelists(ffd, froot, pkg)
self.__writeOther(ofd, oroot, pkg)
pfd.write("</metadata>\n")
ffd.write("</filelists>\n")
ofd.write("</otherdata>\n")
# python-only
del pfd, ffd, ofd
# python-only-end
origpfd.close()
origffd.close()
origofd.close()
repodoc = libxml2.newDoc("1.0")
reporoot = repodoc.newChild(None, "repomd", None)
repons = reporoot.newNs("http://linux.duke.edu/metadata/repo", None)
reporoot.setNs(repons)
workfiles = [(ofdtmp, 1, "other"), (ffdtmp, 1, "filelists"),
(pfdtmp, 1, "primary")]
ngroupfile = repodir + "/" + groupfile
if os.path.exists(ngroupfile):
workfiles.append((ngroupfile, 0, "group"))
for (ffile, gzfile, ftype) in workfiles:
if gzfile:
zfo = PyGZIP(ffile, None, None, None)
uncsum = getChecksum(zfo, self.checksum)
timestamp = os.stat(ffile).st_mtime
csum = getChecksum(ffile, self.checksum)
data = reporoot.newChild(None, "data", None)
data.newProp("type", ftype)
location = data.newChild(None, "location", None)
if baseurl != None:
location.newProp("xml:base", baseurl)
if gzfile:
location.newProp("href", "repodata/" + ftype + ".xml.gz")
else:
location.newProp("href", "repodata/" + groupfile)
checksum = data.newChild(None, "checksum", csum)
checksum.newProp("type", self.checksum)
timestamp = data.newChild(None, "timestamp", str(timestamp))
if gzfile:
unchecksum = data.newChild(None, "open-checksum", uncsum)
unchecksum.newProp("type", self.checksum)
os.rename(pfdtmp, repodir + "/primary.xml.gz")
os.rename(ffdtmp, repodir + "/filelists.xml.gz")
os.rename(ofdtmp, repodir + "/other.xml.gz")
repodoc.saveFormatFileEnc(repodir + "/repomd.xml", "UTF-8", 1)
if self.verbose >= 2:
printhash.nextObject(finish=1)
return 1
def __parseRepomd(self, reader):
"""Parse repomd.xml for sha1 checks of the files. Returns a hash of
the form: name -> {location, checksum, timestamp, open-checksum}."""
rethash = {}
# Make local variables for heavy used functions to speed up this loop
Readf = reader.Read
NodeTypef = reader.NodeType
Namef = reader.Name
tmphash = {}
fname = None
if Readf() != 1 or NodeTypef() != TYPE_ELEMENT or Namef() != "repomd":
return rethash
while Readf() == 1:
ntype = NodeTypef()
if ntype == TYPE_END_ELEMENT:
if Namef() == "repomd":
break
continue
if ntype != TYPE_ELEMENT:
continue
name = Namef()
if name == "data":
props = getProps(reader)
fname = props.get("type")
if not fname:
break
tmphash = {}
rethash[fname] = tmphash
elif name == "location":
props = getProps(reader)
loc = props.get("href")
if loc:
tmphash["location"] = loc
elif name == "checksum" or name == "open-checksum":
props = getProps(reader)
ptype = props.get("type")
if ptype not in ("sha", "md5"):
print "Unsupported checksum type %s in repomd.xml for" \
" file %s" % (ptype, fname)
continue
tmphash[name + "_type"] = ptype
if Readf() != 1:
break
tmphash[name] = reader.Value()
elif name == "timestamp":
if Readf() != 1:
break
tmphash["timestamp"] = reader.Value()
elif name == "database_version":
if Readf() != 1:
break
tmphash["database_version"] = reader.Value()
elif self.verbose > 4:
print "new repomd entry: %s" % name
return rethash
def __parsePrimary(self, reader):
Readf = reader.Read
NodeTypef = reader.NodeType
Namef = reader.Name
while Readf() == 1:
if NodeTypef() == TYPE_ELEMENT and Namef() == "package":
props = getProps(reader)
if props.get("type") == "rpm":
pkg = self.__parsePackage(reader)
if self.readsrc or pkg["arch"] != "src":
self.pkglist[pkg.getNEVRA0()] = pkg
def delDebuginfo(self):
for (nevra, pkg) in self.pkglist.iteritems():
# or should we search for "-debuginfo" only?
if (pkg["name"].endswith("-debuginfo") or
pkg["name"] == "glibc-debuginfo-common"):
del self.pkglist[nevra]
def __removeExcluded(self):
for pkg in parsePackages(self.pkglist.values(), self.excludes):
nevra = pkg.getNEVRA0()
if nevra in self.pkglist:
del self.pkglist[nevra]
def __isExcluded(self, pkg):
return len(parsePackages([pkg, ], self.excludes)) != 0
def __writeVersion(self, pkg_node, pkg):
tnode = pkg_node.newChild(None, "version", None)
tnode.newProp("epoch", pkg.getEpoch())
tnode.newProp("ver", pkg["version"])
tnode.newProp("rel", pkg["release"])
def __writePrimary(self, fd, parent, pkg, formatns):
pkg_node = parent.newChild(None, "package", None)
pkg_node.newProp("type", "rpm")
pkg_node.newChild(None, "name", pkg["name"])
pkg_node.newChild(None, "arch", pkg.getArch())
self.__writeVersion(pkg_node, pkg)
tnode = pkg_node.newChild(None, "checksum", pkg["yumchecksum"])
tnode.newProp("type", self.checksum)
tnode.newProp("pkgid", "YES")
pkg_node.newChild(None, "summary", escape(pkg["summary"][0]))
pkg_node.newChild(None, "description", escape(pkg["description"][0]))
pkg_node.newChild(None, "packager", escape(pkg["packager"]))
pkg_node.newChild(None, "url", escape(pkg["url"]))
tnode = pkg_node.newChild(None, "time", None)
st = os.stat(pkg.filename)
tnode.newProp("file", str(st.st_mtime))
tnode.newProp("build", str(pkg["buildtime"][0]))
tnode = pkg_node.newChild(None, "size", None)
# st.st_size == 96 + pkg.sigdatasize + pkg.sig.getOne("size_in_sig")
tnode.newProp("package", str(st.st_size))
tnode.newProp("installed", str(pkg["size"][0]))
archivesize = pkg.hdr.getOne("archivesize")
if archivesize == None:
archivesize = pkg.sig.getOne("payloadsize")
tnode.newProp("archive", str(archivesize))
tnode = pkg_node.newChild(None, "location", None)
tnode.newProp("href", pkg["yumlocation"])
fnode = pkg_node.newChild(None, "format", None)
self.__generateFormat(fnode, pkg, formatns)
output = pkg_node.serialize("UTF-8", self.pretty)
fd.write(output + "\n")
pkg_node.unlinkNode()
pkg_node.freeNode()
def __writePkgInfo(self, parent, pkg):
pkg_node = parent.newChild(None, "package", None)
pkg_node.newProp("pkgid", pkg["yumchecksum"])
pkg_node.newProp("name", pkg["name"])
pkg_node.newProp("arch", pkg.getArch())
self.__writeVersion(pkg_node, pkg)
return pkg_node
def __writeFilelists(self, fd, parent, pkg):
pkg_node = self.__writePkgInfo(parent, pkg)
self.__generateFilelist(pkg_node, pkg, 0)
output = pkg_node.serialize("UTF-8", self.pretty)
fd.write(output + "\n")
pkg_node.unlinkNode()
pkg_node.freeNode()
def __writeOther(self, fd, parent, pkg):
pkg_node = self.__writePkgInfo(parent, pkg)
if pkg["changelogname"] != None:
for (name, ctime, text) in zip(pkg["changelogname"],
pkg["changelogtime"], pkg["changelogtext"]):
clog = pkg_node.newChild(None, "changelog", None)
clog.addContent(utf8String(text))
clog.newProp("author", utf8String(name))
clog.newProp("date", str(ctime))
output = pkg_node.serialize("UTF-8", self.pretty)
fd.write(output + "\n")
pkg_node.unlinkNode()
pkg_node.freeNode()
def __parsePackage(self, reader):
Readf = reader.Read
NodeTypef = reader.NodeType
Namef = reader.Name
pkg = ReadRpm("repopkg")
pkg.sig = HdrIndex()
pkg.hdr = HdrIndex()
pkg.setHdr()
pkg.sig["size_in_sig"] = [0, ]
while Readf() == 1:
ntype = NodeTypef()
if ntype == TYPE_END_ELEMENT:
if Namef() == "package":
break
continue
if ntype != TYPE_ELEMENT:
continue
name = Namef()
if name == "name":
Readf()
pkg["name"] = reader.Value()
elif name == "arch":
Readf()
pkg["arch"] = reader.Value()
if pkg["arch"] == "src":
pkg.issrc = 1
else:
pkg["sourcerpm"] = ""
elif name == "version":
props = getProps(reader)
pkg["version"] = props["ver"]
pkg["release"] = props["rel"]
pkg["epoch"] = [int(props["epoch"]), ]
elif name == "location":
props = getProps(reader)
pkg.filename = self.filename + "/" + props["href"]
elif name == "format":
self.__parseFormat(reader, pkg)
elif self.fast == 0:
if name == "checksum":
props = getProps(reader)
if props["type"] == "md5":
Readf()
pkg.sig["md5"] = reader.Value()
elif props["type"] == "sha":
Readf()
pkg.sig["sha1header"] = reader.Value()
elif self.verbose > 4:
print "unknown checksum type"
elif name == "size":
props = getProps(reader)
pkg.sig["size_in_sig"][0] += int(props.get("package", "0"))
elif self.verbose > 4 and name not in ("summary",
"description", "packager", "url", "time"):
print "new package entry: %s" % name
return pkg
def __parseFilelist(self, reader):
# Make local variables for heavy used functions to speed up this loop.
Readf = reader.Read
NodeTypef = reader.NodeType
Namef = reader.Name
Valuef = reader.Value
filelist = []
while Readf() == 1:
if NodeTypef() != TYPE_ELEMENT or Namef() != "package":
continue
props = getProps(reader)
pname = props.get("name", "no-name")
arch = props.get("arch", "no-arch")
(epoch, version, release) = ("", "", "")
while Readf() == 1:
ntype = NodeTypef()
if ntype == TYPE_ELEMENT:
name = Namef()
if name == "file":
Readf()
filelist.append(Valuef())
elif name == "version":
props = getProps(reader)
epoch = props["epoch"]
version = props["ver"]
release = props["rel"]
elif self.verbose > 4:
print "new filelist: %s" % name
elif ntype == TYPE_END_ELEMENT:
if Namef() == "package":
break
continue
nevra = "%s-%s:%s-%s.%s" % (pname, epoch, version, release, arch)
if nevra in self.pkglist:
pkg = self.pkglist[nevra]
pkg["oldfilenames"] = filelist
#(pkg["basenames"], pkg["dirindexes"], pkg["dirnames"]) = \
# genBasenames(filelist)
def __generateFormat(self, node, pkg, formatns):
node.newChild(formatns, "license", escape(pkg["license"]))
node.newChild(formatns, "vendor", escape(pkg["vendor"]))
node.newChild(formatns, "group", escape(pkg["group"][0]))
node.newChild(formatns, "buildhost", escape(pkg["buildhost"]))
node.newChild(formatns, "sourcerpm", escape(pkg["sourcerpm"]))
tnode = node.newChild(formatns, "header-range", None)
start = 96 + pkg.sigdatasize
end = start + pkg.hdrdatasize
tnode.newProp("start", str(start))
tnode.newProp("end", str(end))
provides = pkg.getProvides()
if len(provides) > 0:
self.__generateDeps(node, "provides", provides, formatns)
conflicts = pkg.getConflicts()
if len(conflicts) > 0:
self.__generateDeps(node, "conflicts", conflicts, formatns)
obsoletes = pkg.getObsoletes()
if len(obsoletes) > 0:
self.__generateDeps(node, "obsoletes", obsoletes, formatns)
requires = pkg.getRequires()
if len(requires) > 0:
self.__generateDeps(node, "requires", requires, formatns)
self.__generateFilelist(node, pkg)
def __generateDeps(self, node, name, deps, formatns):
dnode = node.newChild(formatns, name, None)
deps = self.__filterDuplicateDeps(deps)
for dep in deps:
enode = dnode.newChild(formatns, "entry", None)
enode.newProp("name", dep[0])
if dep[1] != "":
if (dep[1] & RPMSENSE_SENSEMASK) != 0:
enode.newProp("flags", flagmap[dep[1] & RPMSENSE_SENSEMASK])
if dep[2] != "":
(e, v, r) = evrSplit(dep[2])
enode.newProp("epoch", e)
enode.newProp("ver", v)
if r != "":
enode.newProp("rel", r)
if dep[1] != "" and name == "requires":
#if isLegacyPreReq(dep[1]) or isInstallPreReq(dep[1]):
if (dep[1] & RPMSENSE_PREREQ) != 0:
enode.newProp("pre", "1")
def __generateFilelist(self, node, pkg, filter2=1):
files = pkg.getFilenames()
fileflags = pkg["fileflags"]
filemodes = pkg["filemodes"]
if files == None or fileflags == None or filemodes == None:
return
(writefile, writedir, writeghost) = ([], [], [])
for (fname, mode, flag) in zip(files, filemodes, fileflags):
if S_ISDIR(mode):
if not filter2 or dirrc.match(fname):
writedir.append(fname)
elif not filter2 or filerc.match(fname):
if flag & RPMFILE_GHOST:
writeghost.append(fname)
else:
writefile.append(fname)
writefile.sort()
for f in writefile:
tnode = node.newChild(None, "file", escape(f))
writedir.sort()
for f in writedir:
tnode = node.newChild(None, "file", escape(f))
tnode.newProp("type", "dir")
writeghost.sort()
for f in writeghost:
tnode = node.newChild(None, "file", escape(f))
tnode.newProp("type", "ghost")
def __parseFormat(self, reader, pkg):
filelist = []
while reader.Read() == 1:
ntype = reader.NodeType()
if ntype == TYPE_END_ELEMENT:
if reader.Name() == "format":
break
continue
if ntype != TYPE_ELEMENT:
continue
name = reader.Name()
if name == "rpm:header-range":
props = getProps(reader)
header_start = int(props.get("start", "0"))
header_end = int(props.get("end", "0"))
pkg.sig["size_in_sig"][0] -= header_start
pkg["rpm:header-range:end"] = header_end
elif self.fast == 0:
if name == "rpm:sourcerpm":
reader.Read()
pkg["sourcerpm"] = reader.Value()
elif name == "rpm:provides":
(pkg["providename"], pkg["provideflags"],
pkg["provideversion"]) = self.__parseDeps(reader, name)
elif name == "rpm:requires":
(pkg["requirename"], pkg["requireflags"],
pkg["requireversion"]) = self.__parseDeps(reader, name)
elif name == "rpm:obsoletes":
(pkg["obsoletename"], pkg["obsoleteflags"],
pkg["obsoleteversion"]) = self.__parseDeps(reader, name)
elif name == "rpm:conflicts":
(pkg["conflictname"], pkg["conflictflags"],
pkg["conflictversion"]) = self.__parseDeps(reader, name)
elif name == "file":
reader.Read()
filelist.append(reader.Value())
elif self.verbose > 4 and name not in ("rpm:vendor",
"rpm:buildhost", "rpm:group", "rpm:license"):
print "new repo entry: %s" % name
pkg["oldfilenames"] = filelist
#(pkg["basenames"], pkg["dirindexes"], pkg["dirnames"]) = \
# genBasenames(filelist)
def __filterDuplicateDeps(self, deps):
fdeps = []
for (name, flags, version) in deps:
flags &= RPMSENSE_SENSEMASK | RPMSENSE_PREREQ
if (name, flags, version) not in fdeps:
fdeps.append((name, flags, version))
fdeps.sort()
return fdeps
def __parseDeps(self, reader, ename):
Readf = reader.Read
NodeTypef = reader.NodeType
Namef = reader.Name
plist = ([], [], [])
while Readf() == 1:
ntype = NodeTypef()
if ntype == TYPE_END_ELEMENT:
if Namef() == ename:
break
continue
if ntype != TYPE_ELEMENT:
continue
if Namef() == "rpm:entry":
props = getProps(reader)
name = props["name"]
flags = flagmap[props.get("flags", "")]
if "pre" in props:
flags |= RPMSENSE_PREREQ
epoch = ""
if "epoch" in props:
epoch = props["epoch"] + ":"
ver = props.get("ver", "")
rel = ""
if "rel" in props:
rel = "-" + props["rel"]
plist[0].append(name)
plist[1].append(flags)
plist[2].append("%s%s%s" % (epoch, ver, rel))
return plist
def parseBoolean(s):
lower = s.lower()
if lower in ("yes", "true", "1", "on"):
return 1
return 0
class RpmCompsXML:
def __init__(self, filename):
self.filename = filename
self.grouphash = {}
self.grouphierarchyhash = {}
def printErr(self, err):
print "%s: %s" % (self.filename, err)
def __str__(self):
return str(self.grouphash)
def read(self, filename):
doc = libxml2.parseFile(filename)
if doc == None:
return 0
root = doc.getRootElement()
if root == None:
return 0
node = root.children
while node != None:
if node.type != "element":
node = node.next
continue
if node.name == "group" or node.name == "category":
ret = self.__parseGroup(node.children)
if not ret:
return 0
elif node.name == "grouphierarchy":
# We don't need grouphierarchies, so don't parse them ;)
#ret = self.__parseGroupHierarchy(node.children)
#if not ret:
# return 0
ret = 1
else:
self.printErr("Unknown entry in comps.xml: %s" % node.name)
return 0
node = node.next
return 0
def getPackageNames(self, group):
ret = self.__getPackageNames(group, ("mandatory", "default"))
ret2 = []
for r in ret:
ret2.append(r[0])
ret2.extend(r[1])
return ret2
def getOptionalPackageNames(self, group):
return self.__getPackageNames(group, ("optional",))
def getDefaultPackageNames(self, group):
return self.__getPackageNames(group, ("default",))
def getMandatoryPackageNames(self, group):
return self.__getPackageNames(group, ("mandatory",))
def __getPackageNames(self, group, typelist):
ret = []
if not self.grouphash.has_key(group):
return ret
if self.grouphash[group].has_key("packagelist"):
pkglist = self.grouphash[group]["packagelist"]
for pkgname in pkglist:
for t in typelist:
if pkglist[pkgname][0] == t:
ret.append((pkgname, pkglist[pkgname][1]))
if self.grouphash[group].has_key("grouplist"):
grplist = self.grouphash[group]["grouplist"]
for grpname in grplist["groupreqs"]:
ret.extend(self.__getPackageNames(grpname, typelist))
for grpname in grplist["metapkgs"]:
ret.extend(self.__getPackageNames(grpname, typelist))
# Sort and duplicate removal
ret.sort()
for i in xrange(len(ret) - 2, -1, -1):
if ret[i + 1] == ret[i]:
ret.pop(i + 1)
return ret
def __parseGroup(self, node):
group = {}
while node != None:
if node.type != "element":
node = node.next
continue
if node.name == "name":
lang = node.prop("lang")
if lang:
group["name:" + lang] = node.content
else:
group["name"] = node.content
elif node.name == "id":
group["id"] = node.content
elif node.name == "description":
lang = node.prop("lang")
if lang:
group["description:" + lang] = node.content
else:
group["description"] = node.content
elif node.name == "default":
group["default"] = parseBoolean(node.content)
elif node.name == "langonly":
group["langonly"] = node.content
elif node.name == "packagelist":
group["packagelist"] = self.__parsePackageList(node.children)
elif node.name == "grouplist":
group["grouplist"] = self.__parseGroupList(node.children)
node = node.next
self.grouphash[group["id"]] = group
return 1
def __parsePackageList(self, node):
plist = {}
while node != None:
if node.type != "element":
node = node.next
continue
if node.name == "packagereq":
ptype = node.prop("type")
if ptype == None:
ptype = "default"
requires = node.prop("requires")
if requires != None:
requires = requires.split()
else:
requires = []
plist[node.content] = (ptype, requires)
node = node.next
return plist
def __parseGroupList(self, node):
glist = {}
glist["groupreqs"] = []
glist["metapkgs"] = {}
while node != None:
if node.type != "element":
node = node.next
continue
if node.name == "groupreq" or node.name == "groupid":
glist["groupreqs"].append(node.content)
elif node.name == "metapkg":
gtype = node.prop("type")
if gtype == None:
gtype = "default"
glist["metapkgs"][node.content] = gtype
node = node.next
return glist
def getVars(releasever, arch, basearch):
replacevars = {}
replacevars["$releasever"] = releasever
replacevars["$RELEASEVER"] = releasever
replacevars["$arch"] = arch
replacevars["$ARCH"] = arch
replacevars["$basearch"] = basearch
replacevars["$BASEARCH"] = basearch
for i in xrange(10):
key = "YUM%d" % i
value = os.environ.get(key)
if value != None:
replacevars[key.lower()] = value
replacevars[key] = value
return replacevars
def replaceVars(line, data):
for (key, value) in data.iteritems():
line = line.replace(key, value)
return line
MainVarnames = ("cachedir", "keepcache", "reposdir", "debuglevel",
"errorlevel", "logfile", "gpgcheck", "assumeyes", "alwaysprompt",
"tolerant", "exclude", "exactarch", "installonlypkgs",
"kernelpkgnames", "showdupesfromrepos", "obsoletes",
"overwrite_groups", "enable_group_conditionals", "installroot",
"rss-filename", "distroverpkg",
"diskspacecheck", "tsflags", "recent", "retries", "keepalive",
"timeout", "http_caching", "throttle", "bandwidth", "commands",
"proxy", "proxy_username", "proxy_password", "pkgpolicy",
"plugins", "pluginpath", "pluginconfpath", "metadata_expire",
"mirrorlist_expire")
RepoVarnames = ("name", "baseurl", "mirrorlist", "enabled", "gpgcheck",
"gpgkey", "exclude", "includepkgs", "enablegroups", "failovermethod",
"keepalive", "timeout", "http_caching", "retries", "throttle",
"bandwidth", "metadata_expire", "proxy", "proxy_username",
"proxy_password", "mirrorlist_expire")
def YumConf(verbose, buildroot="", filename="/etc/yum.conf",
reposdirs=[]): # pylint: disable-msg=W0102
import glob
data = {}
ret = YumConf2(filename, verbose, data)
if ret != None:
raise ValueError, "could not read line %d in %s" % (ret, filename)
k = data.get("main", {}).get("reposdir")
if k != None:
reposdirs = k.split(" \t,;")
for reposdir in reposdirs:
for filename in glob.glob(buildroot + reposdir + "/*.repo"):
ret = YumConf2(filename, verbose, data)
if ret != None:
raise ValueError, "could not read line %d in %s" % (ret,
filename)
return data
def YumConf2(filename, verbose, data):
lines = []
if os.path.isfile(filename) and os.access(filename, os.R_OK):
if verbose > 2:
print "Reading in config file %s." % filename
lines = open(filename, "r").readlines()
stanza = "main"
prevcommand = None
for linenum in xrange(len(lines)):
line = lines[linenum].rstrip("\n\r")
if line[:1] == "[" and line.find("]") != -1:
stanza = line[1:line.find("]")]
prevcommand = None
elif prevcommand and line[:1] in " \t":
# continuation line
line = line.strip()
if line and line[:1] not in "#;":
data[stanza][prevcommand].append(line)
else:
line = line.strip()
if line[:1] in "#;" or not line:
pass # comment line
elif line.find("=") != -1:
(key, value) = line.split("=", 1)
(key, value) = (key.strip(), value.strip())
if stanza == "main":
if key not in MainVarnames:
return linenum + 1 # unknown key value
elif key not in RepoVarnames:
return linenum + 1 # unknown key value
prevcommand = None
if key in ("baseurl", "mirrorlist"):
value = [value]
prevcommand = key
data.setdefault(stanza, {})[key] = value
else:
return linenum + 1 # not parsable line
return None
def readYumConf(configfiles, reposdirs, verbose, buildroot, rpmdbpath,
distroverpkg, releasever):
yumconfs = []
for c in configfiles:
yumconfs.append(YumConf(verbose, buildroot, c, reposdirs))
if yumconfs and yumconfs[0].get("main", {}).get("distroverpkg") != None:
distroverpkg = yumconfs[0].get("main", {}).get("distroverpkg")
distroverpkg = distroverpkg.split(",")
if yumconfs and not releasever:
releasever = readReleaseVer(distroverpkg, buildroot, rpmdbpath)
return (yumconfs, distroverpkg, releasever)
def readMirrorlist(mirrorlist, replacevars, key, verbose):
baseurls = []
for mlist in mirrorlist:
mlist = replaceVars(mlist, replacevars)
if verbose > 2:
print "Getting mirrorlist from %s." % mlist
fname = cacheLocal([mlist], "mirrorlist", key,
verbose, nofilename=1)
if not fname:
continue
for l in open(fname).readlines():
l = l.strip()
l = l.replace("$ARCH", "$basearch")
if l and l[0] != "#":
baseurls.append(l)
return baseurls
def readRepos(yumconfs, releasever, arch, readdebug,
readsrc, verbose, readgroupfile=0, fast=1):
global urloptions # pylint: disable-msg=W0603
basearch = buildarchtranslate.get(arch, arch)
repos = []
for yumconf in yumconfs:
for key in yumconf.iterkeys():
if key == "main":
continue
sec = yumconf[key]
if sec.get("enabled") == "0":
continue
urloptions = setOptions(yumconf, key)
baseurls = sec.get("baseurl", [])
replacevars = getVars(releasever, arch, basearch)
excludes = yumconf.get("main", {}).get("exclude", "")
excludes += " " + sec.get("exclude", "")
excludes = replaceVars(excludes, replacevars)
# If we have mirrorlist grab it, read it and add the extended
# lines to our baseurls, just like yum does.
if "mirrorlist" in sec:
mirrorlist = sec["mirrorlist"]
baseurls.extend(readMirrorlist(mirrorlist, replacevars, key,
verbose))
if not baseurls:
print "%s:" % key, "No url for this section in conf file."
urloptions = setOptions()
return None
for i in xrange(len(baseurls)):
baseurls[i] = replaceVars(baseurls[i], replacevars)
repo = RpmRepo(baseurls, excludes, verbose, key, readsrc, fast)
if repo.read(readgroupfile=readgroupfile) == 0:
print "Cannot read repo %s." % key
urloptions = setOptions()
return None
if not readdebug:
repo.delDebuginfo()
repos.append(repo)
urloptions = setOptions()
return repos
def testMirrors(verbose, args):
# We are per default more verbose:
verbose += 3
urloptions["timeout"] = "20.0"
if args:
# python-only
args = [ (a, "5", "i686", "i386") for a in args ]
# python-only-end
# pyrex-code
#args2 = []
#for a in args:
# args2.append((a, "5", "i686", "i386"))
#args = args2
# pyrex-code-end
else:
ml = "http://mirrors.fedoraproject.org/mirrorlist?"
ml = ml + "arch=$basearch&country=global&repo="
args = [
# FC-releases
(ml + "core-$releasever", "4", "i686", "i386"),
(ml + "core-debug-$releasever", "4", "i686", "i386"),
(ml + "core-source-$releasever", "4", "i686", "i386"),
(ml + "core-$releasever", "5", "i686", "i386"),
(ml + "core-debug-$releasever", "5", "i686", "i386"),
(ml + "core-source-$releasever", "5", "i686", "i386"),
(ml + "core-$releasever", "6", "i686", "i386"),
(ml + "core-debug-$releasever", "6", "i686", "i386"),
(ml + "core-source-$releasever", "6", "i686", "i386"),
(ml + "fedora-$releasever", "7", "i686", "i386"),
(ml + "fedora-debug-$releasever", "7", "i686", "i386"),
(ml + "fedora-source-$releasever", "7", "i686", "i386"),
(ml + "rawhide", "8", "i686", "i386"),
(ml + "rawhide-debug", "8", "i686", "i386"),
(ml + "rawhide-source", "8", "i686", "i386"),
# FC-updates
(ml + "updates-released-fc$releasever", "4", "i686", "i386"),
(ml + "updates-released-debug-fc$releasever", "4", "i686", "i386"),
(ml + "updates-released-source-fc$releasever", "4", "i686","i386"),
(ml + "updates-released-fc$releasever", "5", "i686", "i386"),
(ml + "updates-released-debug-fc$releasever", "5", "i686", "i386"),
(ml + "updates-released-source-fc$releasever", "5", "i686","i386"),
(ml + "updates-released-fc$releasever", "6", "i686", "i386"),
(ml + "updates-released-debug-fc$releasever", "6", "i686", "i386"),
(ml + "updates-released-source-fc$releasever", "6", "i686","i386"),
(ml + "updates-released-f$releasever", "7", "i686", "i386"),
(ml + "updates-released-debug-f$releasever", "7", "i686", "i386"),
(ml + "updates-released-source-f$releasever", "7", "i686","i386"),
# FC-updates-testing
(ml + "updates-testing-fc$releasever", "4", "i686", "i386"),
(ml + "updates-testing-debug-fc$releasever", "4", "i686", "i386"),
(ml + "updates-testing-source-fc$releasever", "4", "i686", "i386"),
(ml + "updates-testing-fc$releasever", "5", "i686", "i386"),
(ml + "updates-testing-debug-fc$releasever", "5", "i686", "i386"),
(ml + "updates-testing-source-fc$releasever", "5", "i686", "i386"),
(ml + "updates-testing-fc$releasever", "6", "i686", "i386"),
(ml + "updates-testing-debug-fc$releasever", "6", "i686", "i386"),
(ml + "updates-testing-source-fc$releasever", "6", "i686", "i386"),
(ml + "updates-testing-f$releasever", "7", "i686", "i386"),
(ml + "updates-testing-debug-f$releasever", "7", "i686", "i386"),
(ml + "updates-testing-source-f$releasever", "7", "i686", "i386"),
# Fedora Extras
(ml + "extras-$releasever", "4", "i686", "i386"),
(ml + "extras-$releasever", "5", "i686", "i386"),
(ml + "extras-debug-$releasever", "5", "i686", "i386"),
(ml + "extras-source-$releasever", "5", "i686", "i386"),
(ml + "extras-$releasever", "6", "i686", "i386"),
(ml + "extras-debug-$releasever", "6", "i686", "i386"),
(ml + "extras-source-$releasever", "6", "i686", "i386"),
]
for (mirrorlist, releasever, arch, basearch) in args:
print "---------------------------------------"
replacevars = getVars(releasever, arch, basearch)
m = readMirrorlist([mirrorlist], replacevars, "testmirrors", verbose)
#print m
if verbose > 2:
for reponame in m:
reponame = replaceVars(reponame, replacevars)
repo = RpmRepo([reponame], "", verbose, "testmirrors", 1, 1)
if repo.read(1) == 0:
print "failed"
else:
try:
print time.strftime("%Y/%m/%d", \
time.gmtime(int(repo.repomd["primary"]["timestamp"])))
except:
print "FAILED"
def writeFile(filename, data, mode=None):
(fd, tmpfile) = mkstemp_file(pathdirname(filename), special=1)
fd.write("".join(data))
if mode != None:
os.chmod(tmpfile, mode & 07777)
os.rename(tmpfile, filename)
rootdir = "/home/devel/test"
hgfiles = rootdir + "/filecache"
grepodir = rootdir + "/git-data"
srepodir = rootdir + "/unpacked"
mirror = "/var/www/html/mirror/"
if not os.path.isdir(mirror):
mirror = "/home/mirror/"
fedora = mirror + "fedora/"
rhelupdates = mirror + "updates-rhel/"
srpm_repos = [
# Fedora Core
("Fedora Core development", "FC-development",
[fedora + "development/source/SRPMS"], None),
#("Fedora Core 4", "FC4",
# [fedora + "4/SRPMS", fedora + "updates/4/SRPMS",
# fedora + "updates/testing/4/SRPMS"], None),
#("Fedora Core 3", "FC3",
# [fedora + "3/SRPMS", fedora + "updates/3/SRPMS",
# fedora + "updates/testing/3/SRPMS"], None),
#("Fedora Core 2", "FC2",
# [fedora + "2/SRPMS", fedora + "updates/2/SRPMS",
# fedora + "updates/testing/2/SRPMS"], None),
#("fedora Core 1", "FC1",
# [fedora + "1/SRPMS", fedora + "updates/1/SRPMS",
# fedora + "updates/testing/1/SRPMS"], None),
# Red Hat Enterprise Linux
("Red Hat Enterprise Linux 4", "RHEL4",
[mirror + "rhel/4/en/os/i386/SRPMS", rhelupdates + "4"], None),
("Red Hat Enterprise Linux 3", "RHEL3",
[mirror + "rhel/3/en/os/i386/SRPMS", rhelupdates + "3"], None),
("Red Hat Enterprise Linux 2.1", "RHEL2.1",
[mirror + "rhel/2.1AS/en/os/i386/SRPMS", rhelupdates + "2.1"], None),
]
def getChangeLogFromRpm(pkg, oldpkg):
"""Try to list the changelog data from pkg which is newer if compared
to oldpkg."""
# This only works if an old pkg is available to compare against:
if not oldpkg or not oldpkg["changelogtime"]:
return (-1, None)
# See if the end of the changelog data is the same, then we just
# list the newer entries:
oldlength = len(oldpkg["changelogtime"])
if (pkg["changelogtime"][- oldlength:] == oldpkg["changelogtime"] and
pkg["changelogname"][- oldlength:] == oldpkg["changelogname"] and
pkg["changelogtext"][- oldlength:] == oldpkg["changelogtext"]):
return (len(pkg["changelogtime"]) - oldlength, None)
# Return the time of the first changelog entry in oldpkg:
return (-1, oldpkg["changelogtime"][0])
def cmpNoMD5(a, b):
"""Ignore leading md5sum to sort the "sources" file."""
return cmp(a[33:], b[33:])
def extractSrpm(pkg, pkgdir, filecache, repodir, oldpkg):
pkgname = pkg["name"]
files = pkg.getFilenames()
i = pkg.getSpecfile(files)
specfile = files[i]
fullspecfile = "%s/%s" % (pkgdir, specfile)
(changelognum, changelogtime) = getChangeLogFromRpm(pkg, oldpkg)
if os.path.exists(fullspecfile): # os.access(fullspecfile, os.R_OK)
checksum = getChecksum(fullspecfile)
# same spec file in repo and in rpm: nothing to do
if checksum == pkg["filemd5s"][i]:
return
# If we don't have the previous package anymore, but there is still
# a specfile, read the time of the last changelog entry.
if changelognum == -1 and changelogtime == None:
l = open(fullspecfile, "r").readlines()
while l:
if l[0] == "%changelog\n":
l.pop(0)
break
l.pop(0)
if l:
l = l[0].split()
if l and l[0] == "*" and len(l) >= 5:
try:
import calendar
changelogtime = time.strptime(" ".join(l[1:5]),
"%a %b %d %Y")
changelogtime = calendar.timegm(changelogtime)
except:
pass
os.system('rm -rf "%s"' % pkgdir)
makeDirs(pkgdir)
extractRpm(pkg, pkgdir + "/")
for f in os.listdir(pkgdir):
if f not in files and f not in ("Makefile", "sources"):
fsrc = pkgdir + "/" + f
os.unlink(fsrc)
os.system("cd %s/.. && GIT_DIR=%s git-update-index "
"--remove %s/%s" % (pkgdir, repodir, pkgname, f))
if "sources" in files or "Makefile" in files:
raise ValueError, \
"src.rpm contains sources/Makefile: %s" % pkg.filename
EXTRACT_SOURCE_FOR = ["MAKEDEV", "anaconda", "anaconda-help",
"anaconda-product", "basesystem", "booty", "chkconfig",
"device-mapper", "dmraid", "firstboot", "glibc-kernheaders", "hwdata",
"initscripts", "kudzu", "mkinitrd",
"pam_krb5", "passwd", "redhat-config-kickstart",
"redhat-config-netboot", "redhat-config-network",
"redhat-config-securitylevel", "redhat-logos", "redhat-release",
"rhn-applet", "rhnlib", "rhpl",
"sysklogd", "system-config-securitylevel",
"tux", "udev"]
if repodir.endswith("/RHEL2.1.git"):
EXTRACT_SOURCE_FOR.remove("redhat-config-network")
sources = []
if filecache:
for i in xrange(len(files)):
f = files[i]
if not S_ISREG(pkg["filemodes"][i]) or not isBinary(f):
continue
fsrc = pkgdir + "/" + f
# should we use sha instead of md5:
#md5data = getChecksum(fsrc, "sha")
md5data = pkg["filemd5s"][i]
fdir = "%s/%s" % (filecache, md5data[0:2])
fname = "%s/%s.bin" % (fdir, md5data)
if not os.path.exists(fname):
makeDirs(fdir)
doLnOrCopy(fsrc, fname)
if pkg["name"] in EXTRACT_SOURCE_FOR:
if fsrc.find(".tar") >= 0:
tempdir = "%s/e.tar" % pkgdir
os.mkdir(tempdir)
dirname = explodeFile(fsrc, tempdir, "0")
os.rename(dirname, "%s/tar" % pkgdir)
os.rmdir(tempdir)
os.unlink(fsrc)
sources.append("%s %s\n" % (md5data, f))
sources.sort(cmpNoMD5)
writeFile(pkgdir + "/sources", sources)
writeFile(pkgdir + "/Makefile", [
"include ../pyrpm/Makefile.srpm\n",
"NAME:=%s\nSPECFILE:=%s\n" % (pkg["name"], specfile)])
os.environ["GIT_DIR"] = repodir
os.system("cd %s/.. && { find %s -type f -print | xargs git-update-index "
"-q --add --refresh; }" % (pkgdir, pkgname))
os.system('cd %s/.. && { for file in $(git-ls-files); do [ ! -f "$file" ]'
' && git-update-index --remove "$file"; done; }' % pkgdir)
del os.environ["GIT_DIR"]
# Add changelog text:
(fd, tmpfile) = mkstemp_file(tmpdir, special=1)
fd.write("update to %s" % pkg.getNVR())
if oldpkg:
fd.write(" (from %s-%s)" % (oldpkg["version"], oldpkg["release"]))
if changelognum != -1 or changelogtime != None:
fd.write("\n" + pkg.getChangeLog(changelognum, changelogtime))
fd.write("\n")
fd.close()
# python-only
del fd
# python-only-end
changelog = "-F " + tmpfile
# Add a user name and email:
user = "cvs@devel.redhat.com"
email = user
if pkg["changelogname"]:
user = pkg["changelogname"][0]
if user.rfind("> ") != -1:
user = user[:user.rfind("> ") + 1]
email = user
if email.find("<") != -1:
email = email[email.find("<") + 1:email.rfind(">") + 1]
if user.rfind(" <") != -1:
user = user[:user.rfind(" <")]
# XXX if we monitor trees, we could change the checkin time to
# first day of release of the rpm package instead of rpm buildtime
buildtime = str(pkg.hdr.getOne("buildtime"))
os.system("cd " + repodir + " && GIT_AUTHOR_NAME=\"" + user + \
"\" GIT_AUTHOR_EMAIL=\"" + email + "\" GIT_AUTHOR_DATE=" + \
buildtime + " GIT_COMMITTER_NAME=\"" + user + \
"\" GIT_COMMITTER_EMAIL=\"" + email + "\" GIT_COMMITTER_DATE=" + \
buildtime + " GIT_DIR=" + repodir + " git commit " + changelog)
if tmpfile != None:
os.unlink(tmpfile)
def cmpByTime(a, b):
return cmp(a["buildtime"][0], b["buildtime"][0])
def createMercurial(verbose):
if not os.path.isdir(grepodir) or not os.path.isdir(hgfiles):
print "Error: Paths for mercurial not setup. " + grepodir \
+ " " + hgfiles
return
# Create and initialize repos if still missing.
for (repodescr, reponame, dirs, filecache) in srpm_repos:
repodir = grepodir + "/" + reponame + ".git"
unpackdir = srepodir + "/" + reponame
if not dirs or not os.path.isdir(dirs[0]):
continue
if verbose > 2:
print repodescr
if os.path.isdir(repodir):
firsttime = 0
else:
firsttime = 1
makeDirs(repodir)
os.system("cd %s && { GIT_DIR=%s git init-db; }" % \
(repodir, repodir))
writeFile(repodir + "/description", [repodescr + "\n"])
if not filecache:
filecache = hgfiles + "/" + reponame
makeDirs(unpackdir)
makeDirs(filecache)
pkgs = []
for d in dirs:
pkgs.extend(findRpms(d))
pkgs = readRpm(pkgs, rpmsigtag, rpmtag)
if firsttime:
pkgs.sort(cmpByTime)
else:
pkgs = getPkgsNewest(pkgs)
oldpkgs = {}
for pkg in pkgs:
pkgname = pkg["name"]
pkgdir = unpackdir + "/" + pkgname
extractSrpm(pkg, pkgdir, filecache, repodir, oldpkgs.get(pkgname))
oldpkgs[pkgname] = pkg
os.system("cd %s && { GIT_DIR=%s git repack -d; GIT_DIR=%s git "
"prune-packed; }" % (unpackdir, repodir, repodir))
def checkDeps(rpms, checkfileconflicts, runorderer, verbose=0):
# Calling .sort() below does take a little/tiny bit of time, but has the
# advantage of a deterministic order as well as having errors output in
# sorted order, so they are easier to read.
# Add all packages in.
if verbose > 3:
time1 = time.clock()
resolver = RpmResolver(rpms, checkfileconflicts)
if verbose > 3:
time2 = time.clock()
print "- Needed", time2 - time1, "sec for RpmResolver()."
time1 = time.clock()
# Check for obsoletes.
deps = resolver.obsoletes_list.keys()
deps.sort()
for (name, flag, version) in deps:
orpms = resolver.obsoletes_list[(name, flag, version)]
for pkg in resolver.searchDependency(name, flag, version):
for rpm in orpms:
if rpm.getNEVR0() == pkg.getNEVR0():
continue
if rpm["name"] == name or not pkg in resolver.rpms:
continue
print "Warning:", pkg.getFilename(), "is obsoleted by", \
rpm.getFilename()
resolver.removePkg(pkg)
# Check all conflicts.
conflicts = []
deps = resolver.conflicts_list.keys()
deps.sort()
for (name, flag, version) in deps:
orpms = resolver.conflicts_list[(name, flag, version)]
for pkg in resolver.searchDependency(name, flag, version):
for rpm in orpms:
if rpm.getNEVR0() == pkg.getNEVR0():
continue
print "Warning:", rpm.getFilename(), \
"contains a conflict with", pkg.getFilename()
conflicts.append((rpm, pkg))
conflicts.append((pkg, rpm))
# Check all requires.
deps = resolver.requires_list.keys()
deps.sort()
for (name, flag, version) in deps:
if name[:7] == "rpmlib(":
continue
if not resolver.searchDependency(name, flag, version):
for rpm in resolver.requires_list[(name, flag, version)]:
print "Warning:", rpm.getFilename(), \
"did not find a package for:", \
depString(name, flag, version)
if verbose > 3:
time2 = time.clock()
print "- Needed", time2 - time1, "sec for conflicts/requires/obsoletes."
time1 = time.clock()
# Check for fileconflicts.
if checkfileconflicts:
dirnames = resolver.filenames_list.path.keys()
dirnames.sort()
# First check for directory names which use symlinks as part
# of their filenames:
alldirnames = {}
for dirname in dirnames:
# XXX: compare the following to resolveLink():
path = ""
for elem in dirname.split(os.sep)[1:]:
path = "%s%s%s" % (path, os.sep, elem)
alldirnames[path] = None
alldirnames = alldirnames.keys()
alldirnames.sort()
for dirname in alldirnames:
for (rpm, i) in resolver.filenames_list.searchDependency(dirname, 1):
if S_ISLNK(rpm["filemodes"][i]):
print "symlink", dirname, "from", rpm.getFilename(),
print "is used as directory name in",
for y in resolver.filenames_list.searchStartsWith(dirname):
print y.getFilename(),
print
if verbose > 3:
time2 = time.clock()
print "- Needed", time2 - time1, "sec to check for symlinks with dirnames."
time1 = time.clock()
# Now check for other fileconflicts:
for dirname in dirnames:
pathdirname2 = resolver.filenames_list.path[dirname]
basenames = pathdirname2.keys()
basenames.sort()
for basename in basenames:
s = pathdirname2[basename]
if len(s) < 2:
continue
# We could also only check with the next entry and then
# report one errror for a filename with all rpms listed.
for j in xrange(len(s) - 1):
(rpm1, i1) = s[j]
filemodesi1 = rpm1["filemodes"][i1]
filemd5si1 = rpm1["filemd5s"][i1]
filecolorsi1 = None
if rpm1["filecolors"]:
filecolorsi1 = rpm1["filecolors"][i1]
for k in xrange(j + 1, len(s)):
(rpm2, i2) = s[k]
filemodesi2 = rpm2["filemodes"][i2]
# No fileconflict if mode/md5sum/user/group match.
# In addition also symlink linktos need to match.
if (filemd5si1 == rpm2["filemd5s"][i2] and
filemodesi1 == filemodesi2
and rpm1["fileusername"][i1] ==
rpm2["fileusername"][i2]
and rpm1["filegroupname"][i1] ==
rpm2["filegroupname"][i2]
and (not S_ISLNK(filemodesi1) or
rpm1["filelinktos"][i1] ==
rpm2["filelinktos"][i2])):
continue
# No fileconflict for multilib elf32/elf64 files,
# both files need to be elf32 or elf64 files.
if filecolorsi1 and rpm2["filecolors"]:
filecolorsi2 = rpm2["filecolors"][i2]
if filecolorsi2 and filecolorsi1 != filecolorsi2:
continue
# Mention which fileconflicts also have a real
# Conflicts: dependency within the packages:
kn = ""
if (rpm1, rpm2) in conflicts:
kn = "(known)"
print "fileconflict for", dirname + basename, "in", \
rpm1.getFilename(), "and", rpm2.getFilename(), kn
if verbose > 3:
time2 = time.clock()
print "- Needed", time2 - time1, "sec for fileconflicts."
time1 = time.clock()
# Order rpms on how they get installed.
if runorderer:
orderer = RpmOrderer(resolver.rpms, {}, {}, [], resolver)
operations = orderer.order()
if operations == None:
raise
if verbose > 3:
time2 = time.clock()
print "- Needed", time2 - time1, "sec for rpm ordering."
time1 = time.clock()
#print operations
def checkRepo(rpms):
"""Check if all src.rpms are included and does each -devel rpm have
a corresponding normal rpm of the same arch."""
f = {}
h = {}
srcrpms = {}
for rpm in rpms:
h[(rpm["name"], rpm.getArch())] = 1
if rpm.issrc:
srcrpms[rpm.getFilename()] = 0
f.setdefault(rpm["name"], []).append(rpm)
for rpm in rpms:
if rpm.issrc:
continue
if rpm["name"].endswith("-devel"):
if not h.get((rpm["name"][:-6], rpm.getArch())):
print rpm.getFilename(), "only has a -devel subrpm"
if srcrpms.get(rpm["sourcerpm"]) == None:
print rpm.getFilename(), "does not have a src.rpm", rpm["sourcerpm"]
else:
srcrpms[rpm["sourcerpm"]] += 1
for (rpm, value) in srcrpms.iteritems():
if value == 0:
print rpm, "only has a src.rpm"
for (name, rpms) in f.iteritems():
if len(rpms) > 1:
print name, "has more than one src.rpm with the same name"
def verifyStructure(verbose, packages, phash, tag, useidx=1):
# Verify that all data is also present in /var/lib/rpm/Packages.
for (tid, mytag) in phash.iteritems():
if tid not in packages:
print "Error %s: Package id %s doesn't exist" % (tag, tid)
if verbose > 2:
print tag, mytag
continue
if tag == "dirindexes" and packages[tid]["dirindexes2"] != None:
pkgtag = packages[tid]["dirindexes2"]
elif tag == "dirnames" and packages[tid]["dirnames2"] != None:
pkgtag = packages[tid]["dirnames2"]
elif tag == "basenames" and packages[tid]["basenames2"] != None:
pkgtag = packages[tid]["basenames2"]
else:
pkgtag = packages[tid][tag]
for (idx, mytagidx) in mytag.iteritems():
if useidx:
try:
val = pkgtag[idx]
except IndexError:
print "Error %s: index %s is not in package" % (tag, idx)
if verbose > 2:
print mytagidx
else:
if idx != 0:
print "Error %s: index %s out of range" % (tag, idx)
val = pkgtag
if mytagidx != val:
print "Error %s: %s != %s in package %s" % (tag, mytagidx,
val, packages[tid].getFilename())
# Go through /var/lib/rpm/Packages and check if data is correctly
# copied over to the other files.
for (tid, pkg) in packages.iteritems():
if tag == "dirindexes" and pkg["dirindexes2"] != None:
refhash = pkg["dirindexes2"]
elif tag == "dirnames" and pkg["dirnames2"] != None:
refhash = pkg["dirnames2"]
elif tag == "basenames" and pkg["basenames2"] != None:
refhash = pkg["basenames2"]
else:
refhash = pkg[tag]
if not refhash:
continue
phashtid = None
if tid in phash:
phashtid = phash[tid]
if not useidx:
# Single entry with data:
if phashtid != None and refhash != phashtid[0]:
print "wrong data in packages for", pkg["name"], tid, tag
elif phashtid == None:
print "no data in packages for", pkg["name"], tid, tag
if verbose > 2:
print "refhash:", refhash
continue
tnamehash = {}
for idx in xrange(len(refhash)):
key = refhash[idx]
# Only one group entry is copied over.
if tag == "group" and idx > 0:
continue
# requirename only stored if not InstallPreReq
if tag == "requirename":
if isInstallPreReq(pkg["requireflags"][idx]):
continue
# only include filemd5s for regular files (and ignore
# files with size 0 as broken kernels can generate then
# rpm packages with missing md5sum files for size==0).
if tag == "filemd5s" and (not S_ISREG(pkg["filemodes"][idx]) or
(key == "" and pkg["filesizes"][idx] == 0)):
continue
# We only need to store triggernames once per package.
if tag == "triggername":
if key in tnamehash:
continue
tnamehash[key] = 1
# Real check for the actual data:
try:
if phashtid[idx] != key:
print "wrong data"
except IndexError:
print "Error %s: index %s is not in package %s" % (tag,
idx, tid)
if verbose > 2:
print key, phashtid
def readPackages(buildroot, rpmdbpath, verbose, keepdata=1, hdrtags=None):
import bsddb
if hdrtags == None:
hdrtags = rpmdbtag
packages = {}
pkgdata = {}
keyring = None #openpgp.PGPKeyRing()
maxtid = 0
# Read the db4/hash file to determine byte order / endianness
# as well as maybe host order:
swapendian = ""
data = open(buildroot + rpmdbpath + "Packages", "rb").read(16)
if len(data) == 16:
if unpack("=I", data[12:16])[0] == 0x00061561:
if verbose > 4:
print "Checking rpmdb with same endian order."
else:
#PY3: if pack("=H", 0xdead) == b"\xde\xad":
if pack("=H", 0xdead) == "\xde\xad":
swapendian = "<"
if verbose:
print "Big-endian machine reading little-endian rpmdb."
else:
swapendian = ">"
if verbose:
print "Little-endian machine reading big-endian rpmdb."
db = bsddb.hashopen(buildroot + rpmdbpath + "Packages", "r")
try:
(tid, data) = db.first()
except:
return (packages, keyring, maxtid, pkgdata, swapendian)
while 1:
tid = unpack("%sI" % swapendian, tid)[0]
if tid == 0:
maxtid = unpack("%sI" % swapendian, data)[0]
else:
fd = StringIO(data)
pkg = ReadRpm("rpmdb", fd=fd)
pkg.readHeader(None, hdrtags, keepdata, 1)
if pkg["name"] == "gpg-pubkey":
#for k in openpgp.parsePGPKeys(pkg["description"]):
# keyring.addKey(k)
pkg["group"] = (pkg["group"],)
packages[tid] = pkg
if keepdata:
pkgdata[tid] = data
try:
(tid, data) = db.next()
except:
break
return (packages, keyring, maxtid, pkgdata, swapendian)
def readDb(swapendian, filename, dbtype="hash", dotid=None):
import bsddb
if dbtype == "hash":
db = bsddb.hashopen(filename, "r")
else:
db = bsddb.btopen(filename, "r")
rethash = {}
try:
(k, v) = db.first()
except:
return rethash
while 1:
if dotid:
k = unpack("%sI" % swapendian, k)[0]
#PY3: if k == b"\x00":
if k == "\x00":
k = ""
for i in xrange(0, len(v), 8):
(tid, idx) = unpack("%s2I" % swapendian, v[i:i + 8])
rethash.setdefault(tid, {})
if idx in rethash[tid]:
print "ignoring duplicate idx: %s %d %d" % (k, tid, idx)
continue
rethash[tid][idx] = k
try:
(k, v) = db.next()
except:
break
return rethash
def diffFmt(fmt1, fmt2, fmt3, fmt4):
print "diff between rpmdb header and new header:"
if fmt1 != fmt2:
print "fmt1/fmt2 differ"
if len(fmt1) != len(fmt2):
print "length: fmt1:", len(fmt1), "fmt2:", len(fmt2)
# So this does not output additional entries beyond min():
l = min(len(fmt1), len(fmt2))
for i in xrange(0, l, 16):
(tag1, ttype1, offset1, count1) = unpack("!4I", fmt1[i:i + 16])
(tag2, ttype2, offset2, count2) = unpack("!4I", fmt2[i:i + 16])
if tag1 != tag2 or ttype1 != ttype2 or offset1 != offset2 or \
count1 != count2:
print "tag:", tag1, tag2, i
if ttype1 != ttype2:
print "type:", ttype1, ttype2
if offset1 != offset2:
print "offset:", offset1, offset2
if count1 != count2:
print "count:", count1, count2
if fmt3 != fmt4:
print "fmt3/fmt4 differ"
if len(fmt3) != len(fmt4):
print "length: fmt3:", len(fmt3), "fmt4:", len(fmt4)
def writeRpmdb(pkg):
rpmversion = pkg["rpmversion"]
if rpmversion and rpmversion[:3] not in ("4.0", "3.0", "2.2"):
install_keys["archivesize"] = 1
region = "immutable"
if pkg["immutable"] == None:
region = "immutable1"
install_keys["providename"] = 1
install_keys["provideflags"] = 1
install_keys["provideversion"] = 1
install_keys["dirindexes"] = 1
install_keys["dirnames"] = 1
install_keys["basenames"] = 1
(indexNo, storeSize, fmt, fmt2) = writeHeader(pkg, pkg.hdr.hash, rpmdbtag,
region, {}, 1, pkg.rpmgroup)
if rpmversion and rpmversion[:3] not in ("4.0", "3.0", "2.2"):
del install_keys["archivesize"]
if pkg["immutable"] == None:
del install_keys["providename"]
del install_keys["provideflags"]
del install_keys["provideversion"]
del install_keys["dirindexes"]
del install_keys["dirnames"]
del install_keys["basenames"]
return (indexNo, storeSize, fmt, fmt2)
def readRpmdb(rpmdbpath, distroverpkg, releasever, configfiles, buildroot,
arch, archlist, specifyarch, verbose, checkfileconflicts, reposdirs):
from binascii import b2a_hex
(yumconfs, distroverpkg, releasever) = readYumConf(configfiles, reposdirs,
verbose, buildroot, rpmdbpath, distroverpkg, releasever)
# Read rpmdb:
if verbose:
print "Reading rpmdb, this can take some time..."
print "Reading %sPackages..." % rpmdbpath
if verbose > 2:
time1 = time.clock()
(packages, keyring, maxtid, pkgdata, swapendian) = readPackages(buildroot,
rpmdbpath, verbose)
if verbose:
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to read Packages", \
"(%d rpm packages)." % len(packages.keys())
print "Reading the other files in %s..." % rpmdbpath
if verbose > 2:
time1 = time.clock()
# Read other rpmdb files:
if verbose and sys.version_info < (2, 3):
print "If you use python-2.2 you can get the harmless output:", \
"'Python bsddb: close errno 13 in dealloc'."
basenames = readDb(swapendian, rpmdbpath + "Basenames")
conflictname = readDb(swapendian, rpmdbpath + "Conflictname")
dirnames = readDb(swapendian, rpmdbpath + "Dirnames", "bt")
filemd5s = readDb(swapendian, rpmdbpath + "Filemd5s")
group = readDb(swapendian, rpmdbpath + "Group")
installtid = readDb(swapendian, rpmdbpath + "Installtid", "bt", 1)
name = readDb(swapendian, rpmdbpath + "Name")
providename = readDb(swapendian, rpmdbpath + "Providename")
provideversion = readDb(swapendian, rpmdbpath + "Provideversion", "bt")
# We make "Pubkeys" optional since also pyrpmrebuilddb does not write
# it again:
if not os.access(rpmdbpath + "Pubkeys", os.R_OK):
if verbose:
print "Did not any Pubkey db file."
else:
#pubkeys =
readDb(swapendian, rpmdbpath + "Pubkeys")
requirename = readDb(swapendian, rpmdbpath + "Requirename")
requireversion = readDb(swapendian, rpmdbpath + "Requireversion", "bt")
sha1header = readDb(swapendian, rpmdbpath + "Sha1header")
sigmd5 = readDb(swapendian, rpmdbpath + "Sigmd5")
triggername = readDb(swapendian, rpmdbpath + "Triggername")
if verbose:
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to read the other files."
print "Checking data integrity..."
if verbose > 2:
time1 = time.clock()
# Checking data integrity of the rpmdb:
for tid in packages.iterkeys():
if tid > maxtid:
print "wrong tid:", tid
verifyStructure(verbose, packages, basenames, "basenames")
verifyStructure(verbose, packages, conflictname, "conflictname")
verifyStructure(verbose, packages, dirnames, "dirnames")
for x in filemd5s.itervalues():
for y in x.iterkeys():
x[y] = b2a_hex(x[y])
verifyStructure(verbose, packages, filemd5s, "filemd5s")
verifyStructure(verbose, packages, group, "group")
verifyStructure(verbose, packages, installtid, "installtid")
verifyStructure(verbose, packages, name, "name", 0)
verifyStructure(verbose, packages, providename, "providename")
verifyStructure(verbose, packages, provideversion, "provideversion")
#verifyStructure(verbose, packages, pubkeys, "pubkeys")
verifyStructure(verbose, packages, requirename, "requirename")
verifyStructure(verbose, packages, requireversion, "requireversion")
verifyStructure(verbose, packages, sha1header, "install_sha1header", 0)
verifyStructure(verbose, packages, sigmd5, "install_md5", 0)
verifyStructure(verbose, packages, triggername, "triggername")
arch_hash = setMachineDistance(arch, archlist)
checkdupes = {}
checkevr = {}
# Find out "arch" and set "checkdupes".
for pkg in packages.itervalues():
if (not specifyarch and rpmdbpath != "/var/lib/rpm/" and
pkg["name"] in kernelpkgs):
# This would apply if we e.g. go from i686 -> x86_64, but
# would also go from i686 -> s390 if such a kernel would
# accidentally be installed. Good enough for the normal case.
if arch_hash.get(pkg["arch"]) == None:
arch = pkg["arch"]
arch_hash = setMachineDistance(arch)
print "Change 'arch' setting to be:", arch
# XXX This only checks the name, not the "Provides:":
if (yumconfs and pkg["name"] in distroverpkg and
pkg["version"] != releasever):
print "releasever could also be", pkg["version"], "instead of", \
releasever
if not pkg.isInstallonly():
checkdupes.setdefault("%s.%s" % (pkg["name"], pkg["arch"]),
[]).append(pkg)
checkevr.setdefault("%s" % pkg["name"], []).append(pkg)
# Check "arch" and dupes:
for pkg in packages.itervalues():
if (pkg["name"] != "gpg-pubkey" and
arch_hash.get(pkg["arch"]) == None):
print "Warning: did not expect package with this arch: %s" % \
pkg.getFilename()
if (pkg["arch"] != "noarch" and
"%s.noarch" % pkg["name"] in checkdupes):
print "Warning: noarch and arch-dependent package installed:", \
pkg.getFilename()
for (pkg, value) in checkdupes.iteritems():
if len(value) > 1:
print "Warning: more than one package installed for %s." % pkg
for (pkg, value) in checkevr.iteritems():
if len(value) <= 1:
continue
p = value[0]
evr = (p["epoch"], p["version"], p["release"])
for q in value[1:]:
if evr != (q["epoch"], q["version"], q["release"]):
print p.getFilename(), "has different epoch/version/release", \
" than", q.getFilename()
# Read in repositories to compare packages:
if verbose > 2 and configfiles:
time3 = time.clock()
repos = readRepos(yumconfs, releasever, arch, 1, 0, verbose)
if repos == None:
return 1
if verbose > 2 and configfiles:
print "Needed", time.clock() - time3, "seconds to read the repos."
for (tid, pkg) in packages.iteritems():
if pkg["name"] == "gpg-pubkey":
continue
rpmversion = pkg["rpmversion"]
# Check if we could write the rpmdb data again.
(indexNo, storeSize, fmt, fmt2) = writeRpmdb(pkg)
lead = pack("!2I", indexNo, storeSize)
data = "".join([lead, fmt, fmt2])
if len(data) % 4 != 0:
print "rpmdb header is not aligned to 4"
if data != pkgdata[tid]:
print "writeRpmdb() would not write the same rpmdb data for", \
pkg["name"], "(rpm-%s)" % rpmversion
if verbose >= 3:
diffFmt(pkg.hdrdata[3], fmt, pkg.hdrdata[4], fmt2)
# Try to just copy the immutable region to verify the sha1.
immutable = pkg.getImmutableRegion()
if immutable:
(indexNo, storeSize, fmt, fmt2) = immutable
else:
# If we cannot use the immutable region, try to write our own
# header again.
pkg.sig = HdrIndex()
if pkg["archivesize"] != None:
pkg.sig["payloadsize"] = pkg["archivesize"]
if rpmversion and rpmversion[:3] not in ("4.0", "3.0", "2.2"):
del pkg["archivesize"]
region = "immutable"
if pkg["immutable1"] != None:
region = "immutable1"
(indexNo, storeSize, fmt, fmt2) = writeHeader(None, pkg.hdr.hash,
rpmdbtag, region, install_keys, 0, pkg.rpmgroup)
found = 0
nevra = pkg.getNEVRA0()
for r in repos:
if nevra in r.pkglist:
repopkg = r.pkglist[nevra]
headerend = None
if repopkg["rpm:header-range:end"]:
headerend = repopkg["rpm:header-range:end"]
rpm = ReadRpm(repopkg.filename)
if rpm.readHeader(rpmsigtag, rpmtag, 1, headerend=headerend):
print "Cannot read %s.\n" % repopkg.filename
continue
rpm.closeFd()
if rpm.hdrdata[3] != fmt or rpm.hdrdata[4] != fmt2:
print "Rpm %s in repo does not match." % repopkg.filename
continue
found = 1
# Use the rpm header to write again a rpmdb entry and compare
# that again to the currently existing rpmdb header.
# We should try to write some of these ourselves:
for s in ("installtime", "filestates", "instprefixes",
"installcolor", "installtid"):
if pkg[s] != None:
rpm[s] = pkg[s]
#rpm["installcolor"] = (getInstallColor(arch),)
rpm.genRpmdbHeader()
(_, _, fmta, fmta2) = writeRpmdb(rpm)
if pkg.hdrdata[3] != fmta or pkg.hdrdata[4] != fmta2:
if verbose > 2:
print "Could not write a new rpmdb for %s." \
% repopkg.filename
if verbose >= 4:
diffFmt(pkg.hdrdata[3], fmta, pkg.hdrdata[4], fmta2)
continue
break
if found == 0 and configfiles:
print "Warning: package not found in the repositories:", nevra
# Verify the sha1 crc of the normal header data. (Signature
# data does not have an extra crc.)
sha1header = pkg["install_sha1header"]
if sha1header == None:
if verbose:
print "Warning: package", pkg.getFilename(), \
"does not have a sha1 checksum."
continue
lead = pack("!8s2I", "\x8e\xad\xe8\x01\x00\x00\x00\x00",
indexNo, storeSize)
ctx = sha1.new()
ctx.update(lead)
ctx.update(fmt)
ctx.update(fmt2)
if ctx.hexdigest() != sha1header:
print pkg.getFilename(), \
"bad sha1: %s / %s" % (sha1header, ctx.hexdigest())
checkDeps(packages.values(), checkfileconflicts, 0)
if verbose:
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to check the rpmdb data."
print "Done with checkrpmdb."
return None
def checkSrpms(ignoresymlinks):
directories = [
"/var/www/html/mirror/updates-rhel/2.1",
"/var/www/html/mirror/updates-rhel/3",
"/var/www/html/mirror/updates-rhel/4",
"/mnt/hdb4/data/cAos/3.5/updates/SRPMS",
"/home/mirror/centos/3.6/updates/SRPMS",
"/mnt/hdb4/data/cAos/4.1/os/SRPMS",
"/mnt/hdb4/data/cAos/4.1/updates/SRPMS",
"/home/mirror/centos/4.2/os/SRPMS",
"/home/mirror/centos/4.2/updates/SRPMS",
"/home/mirror/scientific/SRPMS/vendor/errata",
"/home/mirror/scientific/SRPMS/vendor/original",
"/home/mirror/scientific/SRPMS"]
for d in directories:
if not os.path.isdir(d):
continue
rpms = findRpms(d, ignoresymlinks)
rpms = readRpm(rpms, rpmsigtag, rpmtag)
h = {}
for rpm in rpms:
h.setdefault(rpm["name"], []).append(rpm)
for v in h.itervalues():
v.sort(pkgCompare)
for i in xrange(len(v) - 1):
if (v[i].hdr.getOne("buildtime") >
v[i + 1].hdr.getOne("buildtime")):
print "buildtime inversion:", v[i].filename, \
v[i + 1].filename
directories.append("/var/www/html/mirror/rhn/SRPMS")
rpms = []
for d in directories:
if os.path.isdir(d):
rpms.extend(findRpms(d, ignoresymlinks))
rpms = readRpm(rpms, rpmsigtag, rpmtag)
h = {}
for rpm in rpms:
h.setdefault(rpm["name"], []).append(rpm)
for v in h.itervalues():
v.sort(pkgCompare)
i = 0
while i < len(v) - 1:
if pkgCompare(v[i], v[i + 1]) == 0:
if not sameSrcRpm(v[i], v[i + 1]):
print "duplicate rpms:", v[i].filename, v[i + 1].filename
v.remove(v[i])
else:
i += 1
def cmpA(h1, h2):
return cmp(h1[0], h2[0])
def checkArch(path, ignoresymlinks):
print "Mark the arch where a src.rpm would not get built:\n"
arch = ["i386", "x86_64", "ia64", "ppc", "s390", "s390x"]
rpms = findRpms(path, ignoresymlinks)
rpms = readRpm(rpms, rpmsigtag, rpmtag)
# Only look at the newest src.rpms.
h = {}
for rpm in rpms:
h.setdefault(rpm["name"], []).append(rpm)
rpmnames = h.keys()
rpmnames.sort()
for r in rpmnames:
h[r] = [selectNewestRpm(h[r], {}, 0)]
# Print table of archs to look at.
for i in xrange(len(arch) + 2):
s = ""
for a in arch:
if len(a) > i:
s = "%s%s " % (s, a[i])
else:
s = s + " "
print "%29s %s" % ("", s)
showrpms = []
for rp in rpmnames:
srpm = h[rp][0]
builds = {}
showit = 0
n = 1
nn = 0
for a in arch:
if srpm.buildOnArch(a):
builds[a] = 1
nn += n
else:
builds[a] = 0
showit = 1
n = n + n
if showit:
showrpms.append((nn, builds, srpm))
showrpms.sort(cmpA)
for (_, builds, srpm) in showrpms:
s = ""
for a in arch:
if builds[a] == 1:
s = "%s " % s
else:
s = "%sx " % s
print "%29s %s" % (srpm["name"], s)
def checkSymlinks(repo):
"""Check for dangling symlinks."""
allfiles = {}
goodlinks = {}
dangling = []
# collect all files
for rpm in repo:
for f in rpm.filenames:
allfiles[f] = None
for rpm in repo:
if not rpm.filenames:
continue
for (f, mode, link) in zip(rpm.filenames, rpm["filemodes"],
rpm["filelinktos"]):
if not S_ISLNK(mode):
continue
if link[:1] != "/":
link = "%s/%s" % (pathdirname(f), link)
link = os.path.normpath(link)
if link in allfiles:
goodlinks[f] = link
continue
dangling.append((rpm["name"], f, link))
# resolve possible dangling links
dangling.sort()
for (name, f, link) in dangling:
if resolveLink(goodlinks, link) not in allfiles:
print "%s has dangling symlink from %s to %s" % (name, f, link)
def resolveLink(goodlinks, link):
"""Resolve link to file, use information stored in
dictionary of goodlinks"""
path = []
# process all path elements
for elem in link.split(os.sep):
path.append(elem)
tmppath = os.path.join(os.sep, *path)
# If it's a link, replace already processed path:
if tmppath in goodlinks:
path = goodlinks[tmppath].split(os.sep)
return os.path.join(os.sep, *path)
def checkDirs(repo):
backupfile = re.compile(".*~$|.*#[^/]+#$")
# collect all directories
for rpm in repo:
if not rpm.filenames:
continue
for f in rpm.filenames:
# check if startup scripts are in wrong directory
if f.startswith("/etc/init.d/") and not opensuse:
print "init.d:", rpm.filename, f
# output any package having debug stuff included
if (not rpm["name"].endswith("-debuginfo") and
f.startswith("/usr/lib/debug")):
print "debug stuff in normal package:", rpm.filename, f
# output files coming from patch:
if (f.endswith(".orig") or f.endswith(".orig.gz")) and \
not f.startswith("/usr/share/doc/"):
print "maybe patch .orig file in package:", rpm.filename, f
# files coming from cvs:
if f.endswith("/CVS"):
print "maybe includes cvs dir:", rpm.filename, f
# maybe backup files:
if backupfile.match(f):
print "maybe includes backup file:", rpm.filename, f
def checkProvides(repo):
provides = {}
requires = {}
for rpm in repo:
for r in rpm.getRequires():
requires.setdefault(r[0], []).append(rpm.getFilename())
if not rpm.issrc:
for p in rpm.getProvides():
provides.setdefault(p, []).append(rpm)
if provides.keys():
print "Duplicate provides:"
for (p, value) in provides.iteritems():
# only look at duplicate keys
if len(value) <= 1:
continue
# if no require can match this, ignore duplicates
if p[0] not in requires:
continue
x = []
for rpm in value:
#x.append(rpm.getFilename())
if rpm["name"] not in x:
x.append(rpm["name"])
if len(x) <= 1:
continue
print p, x
def checkScripts(repo):
comment = re.compile("^\s*#")
for rpm in repo:
for s in ("postin", "postun", "prein", "preun", "verifyscript"):
data = rpm[s]
if data == None:
continue
if data.find("RPM") != -1:
for line in data.split("\n"):
if line.find("RPM") == -1 or comment.match(line):
continue
# ignore RPM_INSTALL_PREFIX and "rpm --import"
if (line.find("RPM_INSTALL_PREFIX") != -1 or
line.find("rpm --import") != -1):
continue
print rpm.filename, "contains \"RPM\" as string"
break
if data.find("%") != -1:
for line in data.split("\n"):
if line.find("%") == -1 or comment.match(line):
continue
# ignore ${var%extension} constructs
if re.compile(".*\${.+%.+}").match(line):
continue
# ignore "rpm --query --queryformat" and "rpm --eval"
if (line.find("rpm --query --queryformat") != -1 or
line.find("rpm --eval") != -1):
continue
# ignore "find -printf" (cyrus-imapd):
#if line.find("-printf") != -1:
# continue
# ignore `date +string`
if re.compile(".*date \'?\\+").match(line):
continue
# openSuSE "kmp" rpms
if line.find("set --") != -1:
continue
if line.find("printf") != -1:
continue
print rpm.filename, "contains \"%\""
break
def Python2Pyrex():
delete = 0
pyrexcode = 0
for line in sys.stdin.readlines():
l = line.strip()
if delete:
if l == "# python-only-end":
delete = 0
elif l == "# python-only":
delete = 1
elif l == "# pyrex-code":
pyrexcode = 1
elif l == "# pyrex-code-end":
pyrexcode = 0
elif pyrexcode:
while line[0] and line[0] == " ":
sys.stdout.write(line[0])
line = line[1:]
sys.stdout.write(line[1:])
elif l.find(" " + "+= ") != -1:
x = line.find(" " + "+= ")
sys.stdout.write(line[:x])
y = line[:x].strip()
sys.stdout.write(" = " + y + " + (")
sys.stdout.write(line[x + 4:-1] + ")\n")
elif l.find(" " + "-= ") != -1:
x = line.find(" " + "-= ")
sys.stdout.write(line[:x])
y = line[:x].strip()
sys.stdout.write(" = " + y + " - (")
sys.stdout.write(line[x + 4:-1] + ")\n")
elif l.find(" " + "|= ") != -1:
x = line.find(" " + "|= ")
sys.stdout.write(line[:x])
y = line[:x].strip()
sys.stdout.write(" = " + y + " | (")
sys.stdout.write(line[x + 4:-1] + ")\n")
elif l.find(" " + "&= ") != -1:
x = line.find(" " + "&= ")
sys.stdout.write(line[:x])
y = line[:x].strip()
sys.stdout.write(" = " + y + " & (")
sys.stdout.write(line[x + 4:-1] + ")\n")
else:
sys.stdout.write(line)
def usage():
prog = sys.argv[0]
print
print prog, "- Version:", __version__, "-", __doc__
print
print "To check your rpm database:"
print prog, "[--verbose|-v|--quiet|-q] [--rpmdbpath=/var/lib/rpm/] " \
+ "--checkrpmdb"
print "Further opotions:"
print " [--enablerepos]: read in /etc/yum.conf and /etc/yum.repos.d/"
print " [--fileconflicts]: check rpmdb for fileconflicts"
print " [-c /etc/yum.conf] [--releasever 4]"
print
print "Verify and sanity check rpm packages:"
print prog, "[--strict] [--nopayload] [--nodigest] \\"
print " /mirror/fedora/development/i386/Fedora/RPMS"
print "find /mirror/ -name \"*.rpm\" -type f -print0 2>/dev/null \\"
print " | xargs -0", prog, "[--nodigest] [--nopayload]"
print "locate '*.rpm' | xargs", prog, "[--nodigest] [--nopayload]"
print "Options for this are:"
print " [--strict]: add additional checks for the Fedora Core" \
+ " development tree"
print " [--nodigest]: do not verify sha1/md5sum for header+payload"
print " [--nopayload]: do not read in the compressed cpio" \
+ " filedata (payload)"
print " [-c /etc/yum.conf]: experimental option to read repositories"
print " [--releasever 4]: set releasever for reading yum.conf files"
print
print "Diff two src.rpm packages:"
print prog, "[--explode] --diff 1.src.rpm 2.src.rpm"
print
print "Extract src.rpm or normal rpm packages:"
print prog, "[--buildroot=/chroot] --extract *.rpm"
print
print "Check src packages on which arch they would be excluded:"
print prog, "--checkarch /mirror/fedora/development/SRPMS"
print
def main():
import getopt
global cachedir, opensuse # pylint: disable-msg=W0603
if len(sys.argv) <= 1:
usage()
return 0
(_, _, kernelversion, _, arch) = os.uname()
archlist = None
owner = None
if os.geteuid() == 0:
owner = 1
homedir = os.environ.get("HOME", "")
if homedir and not owner:
cachedir = homedir + "/.pyrpm/cache/"
if not os.path.isdir(cachedir):
print "Created the directory %s to cache files locally." % cachedir
makeDirs(cachedir)
verbose = 2
ignoresymlinks = 0
configfiles = []
distroverpkg = ("fedora-release", "redhat-release")
#assumeyes = 0
repo = []
strict = 0
nodigest = 0
payload = 1
wait = 0
verify = 1
small = 0
explode = 0
diff = 0
treediff = 0
extract = 0
excludes = ""
checksrpms = 0
rpmdbpath = "/var/lib/rpm/"
withdb = 0
reposdirs = []
checkarch = 0
checkfileconflicts = 0
runorderer = 0
specifyarch = 0
buildroot = ""
checkrpmdb = 0
checkoldkernel = 0
numkeepkernels = 3
checkdeps = 0
completerepo = 0
baseurl = None
createrepo = 0
groupfile = "comps.xml"
mercurial = 0
pyrex = 0
releasever = ""
updaterpms = 0
exactarch = 1
testmirrors = 0
try:
(opts, args) = getopt.getopt(sys.argv[1:], "c:hqvy?",
["help", "verbose", "quiet", "arch=", "archlist=", "releasever=",
"distroverpkg", "strict", "ignoresymlinks",
"digest", "nodigest", "payload", "nopayload",
"wait", "noverify", "small", "explode", "diff", "treediff",
"extract",
"excludes=", "nofileconflicts", "fileconflicts", "runorderer",
"updaterpms", "reposdir=", "disablereposdir", "enablerepos",
"checksrpms", "checkarch", "rpmdbpath=", "dbpath=", "withdb",
"cachedir=", "checkrpmdb", "checkoldkernel", "numkeepkernels=",
"checkdeps", "completerepo", "buildroot=", "installroot=",
"root=", "version", "baseurl=", "createrepo", "groupfile=",
"mercurial", "pyrex", "testmirrors", "opensuse"])
except getopt.GetoptError, msg:
print "Error:", msg
return 1
for (opt, val) in opts:
if opt in ("-?", "-h", "--help"):
usage()
return 0
elif opt in ("-v", "--verbose"):
verbose += 1
elif opt in ("-q", "--quiet"):
verbose = 0
elif opt == "--ignoresymlinks":
ignoresymlinks = 1
elif opt == "-c":
configfiles.append(val)
elif opt == "--arch":
arch = val
specifyarch = 1
elif opt == "--archlist":
archlist = val.split(",")
arch = archlist[0]
archlist = archlist[1:]
specifyarch = 1
elif opt == "--releasever":
releasever = val
elif opt == "--distroverpkg":
distroverpkg = val.split(",")
elif opt == "-y":
#assumeyes = 1
pass
elif opt == "--strict":
strict = 1
elif opt == "--digest":
nodigest = 0
elif opt == "--nodigest":
nodigest = 1
elif opt == "--payload":
payload = 1
elif opt == "--nopayload":
payload = 0
elif opt == "--nofileconflicts":
checkfileconflicts = 0
elif opt == "--fileconflicts":
checkfileconflicts = 1
elif opt == "--runorderer":
runorderer = 1
elif opt == "--updaterpms":
updaterpms = 1
elif opt == "--wait":
wait = 1
elif opt == "--noverify":
verify = 0
elif opt == "--small":
small = 1
elif opt == "--explode":
explode = 1
elif opt == "--diff":
diff = 1
elif opt == "--treediff":
treediff = 1
elif opt == "--extract":
extract = 1
elif opt == "--excludes":
excludes += " " + val
elif opt == "--checksrpms":
checksrpms = 1
elif opt == "--checkarch":
checkarch = 1
elif opt in ("--rpmdbpath", "--dbpath"):
rpmdbpath = val
if rpmdbpath[-1:] != "/":
rpmdbpath += "/"
elif opt == "--withdb":
withdb = 1
elif opt == "--cachedir":
cachedir = val
if cachedir[-1:] != "/":
cachedir += "/"
elif opt == "--checkrpmdb":
checkrpmdb = 1
elif opt == "--checkoldkernel":
checkoldkernel = 1
elif opt == "--numkeepkernels":
numkeepkernels = int(val)
elif opt == "--checkdeps":
checkdeps = 1
elif opt == "--completerepo":
completerepo = 1
elif opt in ("--buildroot", "--installroot", "--root"):
#if val[:1] != "/":
# print "buildroot should start with a /"
# return 1
buildroot = os.path.abspath(val)
elif opt == "--version":
print sys.argv[0], "version:", __version__
return 0
elif opt == "--baseurl":
baseurl = val
elif opt == "--reposdir":
if val not in reposdirs:
reposdirs.append(val.split(" \t,;"))
elif opt == "--disablereposdir":
reposdirs = []
elif opt == "--enablerepos":
configfiles.append("/etc/yum.conf")
reposdirs.extend(["/etc/yum.repos.d", "/etc/yum/repos.d"])
elif opt == "--createrepo":
createrepo = 1
elif opt == "--groupfile":
groupfile = val
elif opt == "--mercurial":
mercurial = 1
elif opt == "--pyrex":
pyrex = 1
elif opt == "--testmirrors":
testmirrors = 1
elif opt == "--opensuse":
opensuse = 1
# Select of what we want todo here:
if diff:
diff = diffTwoSrpms(args[0], args[1], explode)
if diff != "":
print diff
elif treediff:
print TreeDiff(args[0], args[1])
elif extract:
db = None
if withdb:
db = RpmDB(buildroot, rpmdbpath)
for a in args:
extractRpm(a, buildroot, owner, db)
elif checksrpms:
checkSrpms(ignoresymlinks)
elif checkarch:
checkArch(args[0], ignoresymlinks)
elif checkrpmdb:
if readRpmdb(rpmdbpath, distroverpkg, releasever, configfiles,
buildroot, arch, archlist, specifyarch, verbose,
checkfileconflicts, reposdirs):
return 1
elif checkoldkernel:
mykernelpkgs = kernelpkgs[:]
for i in kernelpkgs:
mykernelpkgs.append(i + "-devel")
ver = kernelversion
for s in ("bigmem", "enterprise", "smp", "hugemem", "PAE",
"guest", "hypervisor", "xen0", "xenU", "xen"):
if ver.endswith(s):
ver = ver[:-len(s)]
# also remove all lower case letters at the end now?
try:
(v, r) = ver.split("-", 1)
except ValueError:
print "Failed to read version and release of the", \
"currently running kernel."
(v, r) = (None, None)
(packages, keyring, maxtid, pkgdata, swapendian) = \
readPackages(buildroot, rpmdbpath, verbose, 0, importanttags)
kernels = []
for pkg in packages.itervalues():
if pkg["name"] in mykernelpkgs:
kernels.append(pkg)
kernels.sort(pkgCompare)
kernels.reverse()
(vr, removekern) = ([], [])
for pkg in kernels:
if (pkg["version"], pkg["release"]) not in vr:
vr.append( (pkg["version"], pkg["release"]) )
if (len(vr) > numkeepkernels and
(v, r) != (pkg["version"], pkg["release"])):
removekern.append(pkg)
if verbose > 2:
print "You have the following kernels installed:"
for pkg in kernels:
print pkg.getFilename()
print "The following older kernels should be removed:"
for pkg in removekern:
print pkg.getFilename2()
elif createrepo:
for a in args:
if not os.path.isdir(a):
print "Createrepo needs a directory name:", a
break
repo = RpmRepo([a], excludes, verbose)
repo.createRepo(baseurl, ignoresymlinks, groupfile)
elif mercurial:
createMercurial(verbose)
elif pyrex:
Python2Pyrex()
elif testmirrors:
testMirrors(verbose, args)
elif updaterpms:
# If no config file specified, default to /etc/yum.conf and also
# the default directories for additional yum repos.
if not configfiles:
configfiles.append("/etc/yum.conf")
if not reposdirs:
reposdirs = ["/etc/yum.repos.d", "/etc/yum/repos.d"]
(yumconfs, distroverpkg, releasever) = readYumConf(configfiles,
reposdirs, verbose, buildroot, rpmdbpath, distroverpkg,
releasever)
if yumconfs and yumconfs[0].get("main", {}).get("exactarch") != None:
exactarch = int(yumconfs[0].get("main", {}).get("exactarch"))
# Read all packages in rpmdb.
if verbose > 2:
time1 = time.clock()
if verbose > 1:
print "Reading the rpmdb in %s." % rpmdbpath
(packages, keyring, maxtid, pkgdata, swapendian) = \
readPackages(buildroot, rpmdbpath, verbose, 0, importanttags)
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to read the rpmdb", \
"(%d rpm packages)." % len(packages.keys())
# Read all repositories.
if verbose > 2:
time1 = time.clock()
repos = readRepos(yumconfs, releasever, arch, 1, 0, verbose, fast=0)
if repos == None:
return 1
if verbose > 2:
time2 = time.clock()
numrpms = 0
for r in repos:
numrpms += len(r.pkglist.keys())
print "Needed", time2 - time1, "seconds to read the repos", \
"(%d rpm packages)." % numrpms
# For timing purposes also read filelists:
if verbose > 2:
time1 = time.clock()
for repo in repos:
repo.importFilelist()
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "secs to read the repo filelists."
# Sort repo packages to only keep the newest.
if verbose > 2:
time1 = time.clock()
pkglist = []
for r in repos:
pkglist.extend(r.pkglist.values())
arch_hash = setMachineDistance(arch, archlist)
pkglist = getPkgsNewest(pkglist, arch, arch_hash, verbose, 1)
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to sort the repos."
# XXX: Here we should also look at Obsoletes:
# Select rpms to update:
if verbose > 2:
time1 = time.clock()
h = {}
# Read all packages from rpmdb, then add all newer packages
# from the repositories. This oder makes sure rpmdb packages
# are selected over their same versions in the repos.
for rpm in packages.itervalues():
if rpm["name"] == "gpg-pubkey":
continue
rarch = rpm["arch"]
if not exactarch:
rarch = buildarchtranslate.get(rarch, rarch)
h.setdefault( (rpm["name"], rarch) , []).append(rpm)
for rpm in pkglist:
rarch = rpm["arch"]
if not exactarch:
rarch = buildarchtranslate.get(rarch, rarch)
key = (rpm["name"], rarch)
if key in h:
h[key].append(rpm)
# Now select which rpms to install/erase:
installrpms = []
eraserpms = []
for r in h.itervalues():
if r[0].isInstallonly():
# XXX check if there is a newer "kernel" around
continue
newest = selectNewestRpm(r, arch_hash, verbose)
if newest == r[0]:
continue
eraserpms.append(r[0])
installrpms.append(newest)
# Check noarch constraints.
#if None:
# for rpms in h.itervalues():
# newest = selectNewestRpm(rpms, arch_hash, verbose)
# if newest["arch"] == "noarch":
# for r in rpms:
# if r != newest:
# pkgs.remove(r)
# else:
# for r in rpms:
# if r["arch"] == "noarch":
# pkgs.remove(r)
#installrpms = getPkgsNewest(rtree.getPkgs(), arch, arch_hash,
# verbose, 0)
#checkDeps(installrpms, checkfileconflicts, runorderer)
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to check for updates."
if verbose > 1:
if not installrpms:
print "No package updates found."
for rpm in installrpms:
print "Updating to %s." % rpm.getFilename()
else:
keepdata = 1
hdrtags = rpmtag
if verify == 0 and nodigest == 1:
keepdata = 0
if small:
hdrtags = importanttags
if configfiles and verbose > 2:
time1 = time.clock()
(yumconfs, distroverpkg, releasever) = readYumConf(configfiles,
reposdirs, verbose, buildroot, rpmdbpath, distroverpkg,
releasever)
repos = readRepos(yumconfs, releasever, arch, 0, 1, verbose)
if configfiles and verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to read the repos."
if repos == None:
return 1
headerend = {}
for r in repos:
for p in r.pkglist.itervalues():
args.append(p.filename)
if p["rpm:header-range:end"]:
headerend[p.filename] = p["rpm:header-range:end"]
time1 = time.clock()
checkarchs = []
for a in args:
a = Uri2Filename(a)
b = [a]
if not a.endswith(".rpm") and not isUrl(a) and os.path.isdir(a):
b = findRpms(a, ignoresymlinks)
for a in b:
#print a
rpm = verifyRpm(a, verify, strict, payload, nodigest, hdrtags,
keepdata, headerend.get(a))
if rpm == None:
continue
#f = rpm["filenames"]
#if f:
# print rpm.getFilename()
# print f
if checkdeps or completerepo or strict or wait:
if (rpm["name"] in kernelpkgs and not rpm.issrc and
rpm["arch"] not in checkarchs):
checkarchs.append(rpm["arch"])
repo.append(rpm)
# python-only
del rpm
# python-only-end
if verbose > 2:
time2 = time.clock()
print "Needed", time2 - time1, "seconds to read", len(repo), \
"rpm packages."
if strict:
for rpm in repo:
rpm.filenames = rpm.getFilenames()
checkDirs(repo)
if not opensuse:
checkSymlinks(repo)
checkScripts(repo)
if strict or checkdeps:
if specifyarch:
checkarchs = [arch, ]
if checkarchs:
if excludes:
excludes = excludes.strip(" \t,;")
excludes = excludes.split(" \t,;")
for arch in checkarchs:
time1 = time.clock()
print "Check as if kernel has the", \
"architecture \"%s\" now:" % arch
arch_hash = setMachineDistance(arch, archlist)
installrpms = getPkgsNewest(repo, arch, arch_hash,
verbose, 0, 1)
if excludes:
for excluderpm in parsePackages(installrpms, excludes):
if excluderpm in installrpms:
print "Excluding %s." % excluderpm.getFilename()
installrpms.remove(excluderpm)
if strict:
checkProvides(installrpms)
checkDeps(installrpms, checkfileconflicts, runorderer,
verbose)
time2 = time.clock()
print "Needed", time2 - time1, "sec to check this tree."
else:
print "No arch defined to check, are kernels missing?"
if completerepo:
checkRepo(repo)
if wait:
print "Ready."
time.sleep(30)
return 0
def run_main(mymain):
dohotshot = 0
if len(sys.argv) >= 2 and sys.argv[1] == "--hotshot":
dohotshot = 1
sys.argv.pop(1)
if dohotshot:
import hotshot, hotshot.stats
htfilename = mkstemp_file(tmpdir)[1]
prof = hotshot.Profile(htfilename)
prof.runcall(mymain)
prof.close()
# python-only
del prof
# python-only-end
print "Starting profil statistics. This takes some time..."
s = hotshot.stats.load(htfilename)
s.strip_dirs()
s.sort_stats("time").print_stats(100)
s.sort_stats("cumulative").print_stats(100)
s.sort_stats("calls").print_stats(100)
os.unlink(htfilename)
else:
ret = mymain()
if ret != None:
sys.exit(ret)
if __name__ == "__main__":
#checkCSV()
run_main(main)
# vim:ts=4:sw=4:showmatch:expandtab
|
kholia/pyrpm
|
scripts/oldpyrpm.py
|
Python
|
gpl-2.0
| 281,247
|
[
"VisIt"
] |
8c7f4911b893e5757b2a348be30be2ca6b880831a835549dbfda401708ab38ba
|
#
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2011-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Elemental masses (most common isotope), symbols, and atomic numbers from psi4.
"""
_temp_element = ["GHOST", "HYDROGEN", "HELIUM", "LITHIUM", "BERYLLIUM",
"BORON", "CARBON", "NITROGEN", "OXYGEN", "FLUORINE",
"NEON", "SODIUM", "MAGNESIUM", "ALUMINUM", "SILICON",
"PHOSPHORUS", "SULFUR", "CHLORINE", "ARGON", "POTASSIUM",
"CALCIUM", "SCANDIUM", "TITANIUM", "VANADIUM", "CHROMIUM",
"MANGANESE", "IRON", "COBALT", "NICKEL", "COPPER",
"ZINC", "GALLIUM", "GERMANIUM", "ARSENIC", "SELENIUM",
"BROMINE", "KRYPTON", "RUBIDIUM", "STRONTIUM", "YTTRIUM",
"ZIRCONIUM", "NIOBIUM", "MOLYBDENUM", "TECHNETIUM", "RUTHENIUM",
"RHODIUM", "PALLADIUM", "SILVER", "CADMIUM", "INDIUM",
"TIN", "ANTIMONY", "TELLURIUM", "IODINE", "XENON",
"CESIUM", "BARIUM", "LANTHANUM", "CERIUM", "PRASEODYMIUM",
"NEODYMIUM", "PROMETHIUM", "SAMARIUM", "EUROPIUM", "GADOLINIUM",
"TERBIUM", "DYSPROSIUM", "HOLMIUM", "ERBIUM", "THULIUM",
"YTTERBIUM", "LUTETIUM", "HAFNIUM", "TANTALUM", "TUNGSTEN",
"RHENIUM", "OSMIUM", "IRIDIUM", "PLATINUM", "GOLD",
"MERCURY", "THALLIUM", "LEAD", "BISMUTH", "POLONIUM",
"ASTATINE", "RADON", "FRANCIUM", "RADIUM", "ACTINIUM",
"THORIUM", "PROTACTINIUM", "URANIUM", "NEPTUNIUM", "PLUTONIUM",
"AMERICIUM", "CURIUM", "BERKELIUM", "CALIFORNIUM", "EINSTEINIUM",
"FERMIUM", "MENDELEVIUM", "NOBELIUM", "LAWRENCIUM" "RUTHERFORDIUM",
"DUBNIUM", "SEABORGIUM", "BOHRIUM"]
_temp_symbol = ["X", "H", "HE", "LI", "BE", "B", "C", "N", "O", "F", "NE", "NA", "MG",
"AL", "SI", "P", "S", "CL", "AR", "K", "CA", "SC", "TI", "V", "CR", "MN", "FE", "CO",
"NI", "CU", "ZN", "GA", "GE", "AS", "SE", "BR", "KR", "RB", "SR", "Y", "ZR", "NB",
"MO", "TC", "RU", "RH", "PD", "AG", "CD", "IN", "SN", "SB", "TE", "I", "XE", "CS",
"BA", "LA", "CE", "PR", "ND", "PM", "SM", "EU", "GD", "TB", "DY", "HO", "ER", "TM",
"YB", "LU", "HF", "TA", "W", "RE", "OS", "IR", "PT", "AU", "HG", "TL", "PB", "BI",
"PO", "AT", "RN", "FR", "RA", "AC", "TH", "PA", "U", "NP", "PU", "AM", "CM", "BK",
"CF", "ES", "FM", "MD", "NO", "LR", "RF", "DB", "SG", "BH", "HS", "MT", "DS", "RG",
"UUB", "UUT", "UUQ", "UUP", "UUH", "UUS", "UUO"]
_temp_z = list(range(0, 108))
_temp_mass = [
0., 1.00782503207, 4.00260325415, 7.016004548, 9.012182201, 11.009305406,
12, 14.00307400478, 15.99491461956, 18.998403224, 19.99244017542,
22.98976928087, 23.985041699, 26.981538627, 27.97692653246, 30.973761629,
31.972070999, 34.968852682, 39.96238312251, 38.963706679, 39.962590983,
44.955911909, 47.947946281, 50.943959507, 51.940507472, 54.938045141,
55.934937475, 58.933195048, 57.935342907, 62.929597474, 63.929142222,
68.925573587, 73.921177767, 74.921596478, 79.916521271, 78.918337087,
85.910610729, 84.911789737, 87.905612124, 88.905848295, 89.904704416,
92.906378058, 97.905408169, 98.906254747, 101.904349312, 102.905504292,
105.903485715, 106.90509682, 113.90335854, 114.903878484, 119.902194676,
120.903815686, 129.906224399, 126.904472681, 131.904153457, 132.905451932,
137.905247237, 138.906353267, 139.905438706, 140.907652769, 141.907723297,
144.912749023, 151.919732425, 152.921230339, 157.924103912, 158.925346757,
163.929174751, 164.93032207, 165.930293061, 168.93421325, 173.938862089,
174.940771819, 179.946549953, 180.947995763, 183.950931188, 186.955753109,
191.96148069, 192.96292643, 194.964791134, 196.966568662, 201.970643011,
204.974427541, 207.976652071, 208.980398734, 208.982430435, 210.987496271,
222.017577738, 222.01755173, 228.031070292, 227.027752127, 232.038055325,
231.03588399, 238.050788247, 237.048173444, 242.058742611, 243.06138108,
247.07035354, 247.07030708, 251.079586788, 252.082978512, 257.095104724,
258.098431319, 255.093241131, 260.105504, 263.112547, 255.107398, 259.114500,
262.122892, 263.128558, 265.136151, 281.162061, 272.153615, 283.171792, 283.176451,
285.183698, 287.191186, 292.199786, 291.206564, 293.214670]
_temp_iso_symbol = [
"H", "H1", "H2", "D", "H3", "T", "H4", "H5", "H6", "H7", "HE", "HE3", "HE4",
"HE5", "HE6", "HE7", "HE8", "HE9", "HE10", "LI", "LI3", "LI4", "LI5", "LI6",
"LI7", "LI8", "LI9", "LI10", "LI11", "LI12", "BE", "BE5", "BE6", "BE7", "BE8",
"BE9", "BE10", "BE11", "BE12", "BE13", "BE14", "BE15", "BE16", "B", "B6", "B7",
"B8", "B9", "B10", "B11", "B12", "B13", "B14", "B15", "B16", "B17", "B18", "B19",
"C", "C8", "C9", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18",
"C19", "C20", "C21", "C22", "N", "N10", "N11", "N12", "N13", "N14", "N15", "N16",
"N17", "N18", "N19", "N20", "N21", "N22", "N23", "N24", "N25", "O", "O12", "O13",
"O14", "O15", "O16", "O17", "O18", "O19", "O20", "O21", "O22", "O23", "O24",
"O25", "O26", "O27", "O28", "F", "F14", "F15", "F16", "F17", "F18", "F19", "F20",
"F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31",
"NE", "NE16", "NE17", "NE18", "NE19", "NE20", "NE21", "NE22", "NE23", "NE24",
"NE25", "NE26", "NE27", "NE28", "NE29", "NE30", "NE31", "NE32", "NE33", "NE34",
"NA", "NA18", "NA19", "NA20", "NA21", "NA22", "NA23", "NA24", "NA25", "NA26",
"NA27", "NA28", "NA29", "NA30", "NA31", "NA32", "NA33", "NA34", "NA35", "NA36",
"NA37", "MG", "MG19", "MG20", "MG21", "MG22", "MG23", "MG24", "MG25", "MG26",
"MG27", "MG28", "MG29", "MG30", "MG31", "MG32", "MG33", "MG34", "MG35", "MG36",
"MG37", "MG38", "MG39", "MG40", "AL", "AL21", "AL22", "AL23", "AL24", "AL25",
"AL26", "AL27", "AL28", "AL29", "AL30", "AL31", "AL32", "AL33", "AL34", "AL35",
"AL36", "AL37", "AL38", "AL39", "AL40", "AL41", "AL42", "SI", "SI22", "SI23",
"SI24", "SI25", "SI26", "SI27", "SI28", "SI29", "SI30", "SI31", "SI32", "SI33",
"SI34", "SI35", "SI36", "SI37", "SI38", "SI39", "SI40", "SI41", "SI42", "SI43",
"SI44", "P", "P24", "P25", "P26", "P27", "P28", "P29", "P30", "P31", "P32",
"P33", "P34", "P35", "P36", "P37", "P38", "P39", "P40", "P41", "P42", "P43",
"P44", "P45", "P46", "S", "S26", "S27", "S28", "S29", "S30", "S31", "S32", "S33",
"S34", "S35", "S36", "S37", "S38", "S39", "S40", "S41", "S42", "S43", "S44",
"S45", "S46", "S47", "S48", "S49", "CL", "CL28", "CL29", "CL30", "CL31", "CL32",
"CL33", "CL34", "CL35", "CL36", "CL37", "CL38", "CL39", "CL40", "CL41", "CL42",
"CL43", "CL44", "CL45", "CL46", "CL47", "CL48", "CL49", "CL50", "CL51", "AR",
"AR30", "AR31", "AR32", "AR33", "AR34", "AR35", "AR36", "AR37", "AR38", "AR39",
"AR40", "AR41", "AR42", "AR43", "AR44", "AR45", "AR46", "AR47", "AR48", "AR49",
"AR50", "AR51", "AR52", "AR53", "K", "K32", "K33", "K34", "K35", "K36", "K37",
"K38", "K39", "K40", "K41", "K42", "K43", "K44", "K45", "K46", "K47", "K48",
"K49", "K50", "K51", "K52", "K53", "K54", "K55", "CA", "CA34", "CA35", "CA36",
"CA37", "CA38", "CA39", "CA40", "CA41", "CA42", "CA43", "CA44", "CA45", "CA46",
"CA47", "CA48", "CA49", "CA50", "CA51", "CA52", "CA53", "CA54", "CA55", "CA56",
"CA57", "SC", "SC36", "SC37", "SC38", "SC39", "SC40", "SC41", "SC42", "SC43",
"SC44", "SC45", "SC46", "SC47", "SC48", "SC49", "SC50", "SC51", "SC52", "SC53",
"SC54", "SC55", "SC56", "SC57", "SC58", "SC59", "SC60", "TI", "TI38", "TI39",
"TI40", "TI41", "TI42", "TI43", "TI44", "TI45", "TI46", "TI47", "TI48", "TI49",
"TI50", "TI51", "TI52", "TI53", "TI54", "TI55", "TI56", "TI57", "TI58", "TI59",
"TI60", "TI61", "TI62", "TI63", "V", "V40", "V41", "V42", "V43", "V44", "V45",
"V46", "V47", "V48", "V49", "V50", "V51", "V52", "V53", "V54", "V55", "V56",
"V57", "V58", "V59", "V60", "V61", "V62", "V63", "V64", "V65", "CR", "CR42",
"CR43", "CR44", "CR45", "CR46", "CR47", "CR48", "CR49", "CR50", "CR51", "CR52",
"CR53", "CR54", "CR55", "CR56", "CR57", "CR58", "CR59", "CR60", "CR61", "CR62",
"CR63", "CR64", "CR65", "CR66", "CR67", "MN", "MN44", "MN45", "MN46", "MN47",
"MN48", "MN49", "MN50", "MN51", "MN52", "MN53", "MN54", "MN55", "MN56", "MN57",
"MN58", "MN59", "MN60", "MN61", "MN62", "MN63", "MN64", "MN65", "MN66", "MN67",
"MN68", "MN69", "FE", "FE45", "FE46", "FE47", "FE48", "FE49", "FE50", "FE51",
"FE52", "FE53", "FE54", "FE55", "FE56", "FE57", "FE58", "FE59", "FE60", "FE61",
"FE62", "FE63", "FE64", "FE65", "FE66", "FE67", "FE68", "FE69", "FE70", "FE71",
"FE72", "CO", "CO47", "CO48", "CO49", "CO50", "CO51", "CO52", "CO53", "CO54",
"CO55", "CO56", "CO57", "CO58", "CO59", "CO60", "CO61", "CO62", "CO63", "CO64",
"CO65", "CO66", "CO67", "CO68", "CO69", "CO70", "CO71", "CO72", "CO73", "CO74",
"CO75", "NI", "NI48", "NI49", "NI50", "NI51", "NI52", "NI53", "NI54", "NI55",
"NI56", "NI57", "NI58", "NI59", "NI60", "NI61", "NI62", "NI63", "NI64", "NI65",
"NI66", "NI67", "NI68", "NI69", "NI70", "NI71", "NI72", "NI73", "NI74", "NI75",
"NI76", "NI77", "NI78", "CU", "CU52", "CU53", "CU54", "CU55", "CU56", "CU57",
"CU58", "CU59", "CU60", "CU61", "CU62", "CU63", "CU64", "CU65", "CU66", "CU67",
"CU68", "CU69", "CU70", "CU71", "CU72", "CU73", "CU74", "CU75", "CU76", "CU77",
"CU78", "CU79", "CU80", "ZN", "ZN54", "ZN55", "ZN56", "ZN57", "ZN58", "ZN59",
"ZN60", "ZN61", "ZN62", "ZN63", "ZN64", "ZN65", "ZN66", "ZN67", "ZN68", "ZN69",
"ZN70", "ZN71", "ZN72", "ZN73", "ZN74", "ZN75", "ZN76", "ZN77", "ZN78", "ZN79",
"ZN80", "ZN81", "ZN82", "ZN83", "GA", "GA56", "GA57", "GA58", "GA59", "GA60",
"GA61", "GA62", "GA63", "GA64", "GA65", "GA66", "GA67", "GA68", "GA69", "GA70",
"GA71", "GA72", "GA73", "GA74", "GA75", "GA76", "GA77", "GA78", "GA79", "GA80",
"GA81", "GA82", "GA83", "GA84", "GA85", "GA86", "GE", "GE58", "GE59", "GE60",
"GE61", "GE62", "GE63", "GE64", "GE65", "GE66", "GE67", "GE68", "GE69", "GE70",
"GE71", "GE72", "GE73", "GE74", "GE75", "GE76", "GE77", "GE78", "GE79", "GE80",
"GE81", "GE82", "GE83", "GE84", "GE85", "GE86", "GE87", "GE88", "GE89", "AS",
"AS60", "AS61", "AS62", "AS63", "AS64", "AS65", "AS66", "AS67", "AS68", "AS69",
"AS70", "AS71", "AS72", "AS73", "AS74", "AS75", "AS76", "AS77", "AS78", "AS79",
"AS80", "AS81", "AS82", "AS83", "AS84", "AS85", "AS86", "AS87", "AS88", "AS89",
"AS90", "AS91", "AS92", "SE", "SE65", "SE66", "SE67", "SE68", "SE69", "SE70",
"SE71", "SE72", "SE73", "SE74", "SE75", "SE76", "SE77", "SE78", "SE79", "SE80",
"SE81", "SE82", "SE83", "SE84", "SE85", "SE86", "SE87", "SE88", "SE89", "SE90",
"SE91", "SE92", "SE93", "SE94", "BR", "BR67", "BR68", "BR69", "BR70", "BR71",
"BR72", "BR73", "BR74", "BR75", "BR76", "BR77", "BR78", "BR79", "BR80", "BR81",
"BR82", "BR83", "BR84", "BR85", "BR86", "BR87", "BR88", "BR89", "BR90", "BR91",
"BR92", "BR93", "BR94", "BR95", "BR96", "BR97", "KR", "KR69", "KR70", "KR71",
"KR72", "KR73", "KR74", "KR75", "KR76", "KR77", "KR78", "KR79", "KR80", "KR81",
"KR82", "KR83", "KR84", "KR85", "KR86", "KR87", "KR88", "KR89", "KR90", "KR91",
"KR92", "KR93", "KR94", "KR95", "KR96", "KR97", "KR98", "KR99", "KR100", "RB",
"RB71", "RB72", "RB73", "RB74", "RB75", "RB76", "RB77", "RB78", "RB79", "RB80",
"RB81", "RB82", "RB83", "RB84", "RB85", "RB86", "RB87", "RB88", "RB89", "RB90",
"RB91", "RB92", "RB93", "RB94", "RB95", "RB96", "RB97", "RB98", "RB99",
"RB100", "RB101", "RB102", "SR", "SR73", "SR74", "SR75", "SR76", "SR77",
"SR78", "SR79", "SR80", "SR81", "SR82", "SR83", "SR84", "SR85", "SR86", "SR87",
"SR88", "SR89", "SR90", "SR91", "SR92", "SR93", "SR94", "SR95", "SR96", "SR97",
"SR98", "SR99", "SR100", "SR101", "SR102", "SR103", "SR104", "SR105", "Y",
"Y76", "Y77", "Y78", "Y79", "Y80", "Y81", "Y82", "Y83", "Y84", "Y85", "Y86",
"Y87", "Y88", "Y89", "Y90", "Y91", "Y92", "Y93", "Y94", "Y95", "Y96", "Y97",
"Y98", "Y99", "Y100", "Y101", "Y102", "Y103", "Y104", "Y105", "Y106", "Y107",
"Y108", "ZR", "ZR78", "ZR79", "ZR80", "ZR81", "ZR82", "ZR83", "ZR84", "ZR85",
"ZR86", "ZR87", "ZR88", "ZR89", "ZR90", "ZR91", "ZR92", "ZR93", "ZR94", "ZR95",
"ZR96", "ZR97", "ZR98", "ZR99", "ZR100", "ZR101", "ZR102", "ZR103", "ZR104",
"ZR105", "ZR106", "ZR107", "ZR108", "ZR109", "ZR110", "NB", "NB81", "NB82",
"NB83", "NB84", "NB85", "NB86", "NB87", "NB88", "NB89", "NB90", "NB91", "NB92",
"NB93", "NB94", "NB95", "NB96", "NB97", "NB98", "NB99", "NB100", "NB101",
"NB102", "NB103", "NB104", "NB105", "NB106", "NB107", "NB108", "NB109",
"NB110", "NB111", "NB112", "NB113", "MO", "MO83", "MO84", "MO85", "MO86",
"MO87", "MO88", "MO89", "MO90", "MO91", "MO92", "MO93", "MO94", "MO95", "MO96",
"MO97", "MO98", "MO99", "MO100", "MO101", "MO102", "MO103", "MO104", "MO105",
"MO106", "MO107", "MO108", "MO109", "MO110", "MO111", "MO112", "MO113",
"MO114", "MO115", "TC", "TC85", "TC86", "TC87", "TC88", "TC89", "TC90", "TC91",
"TC92", "TC93", "TC94", "TC95", "TC96", "TC97", "TC98", "TC99", "TC100",
"TC101", "TC102", "TC103", "TC104", "TC105", "TC106", "TC107", "TC108",
"TC109", "TC110", "TC111", "TC112", "TC113", "TC114", "TC115", "TC116",
"TC117", "TC118", "RU", "RU87", "RU88", "RU89", "RU90", "RU91", "RU92", "RU93",
"RU94", "RU95", "RU96", "RU97", "RU98", "RU99", "RU100", "RU101", "RU102",
"RU103", "RU104", "RU105", "RU106", "RU107", "RU108", "RU109", "RU110",
"RU111", "RU112", "RU113", "RU114", "RU115", "RU116", "RU117", "RU118",
"RU119", "RU120", "RH", "RH89", "RH90", "RH91", "RH92", "RH93", "RH94", "RH95",
"RH96", "RH97", "RH98", "RH99", "RH100", "RH101", "RH102", "RH103", "RH104",
"RH105", "RH106", "RH107", "RH108", "RH109", "RH110", "RH111", "RH112",
"RH113", "RH114", "RH115", "RH116", "RH117", "RH118", "RH119", "RH120",
"RH121", "RH122", "PD", "PD91", "PD92", "PD93", "PD94", "PD95", "PD96", "PD97",
"PD98", "PD99", "PD100", "PD101", "PD102", "PD103", "PD104", "PD105", "PD106",
"PD107", "PD108", "PD109", "PD110", "PD111", "PD112", "PD113", "PD114",
"PD115", "PD116", "PD117", "PD118", "PD119", "PD120", "PD121", "PD122",
"PD123", "PD124", "AG", "AG93", "AG94", "AG95", "AG96", "AG97", "AG98", "AG99",
"AG100", "AG101", "AG102", "AG103", "AG104", "AG105", "AG106", "AG107",
"AG108", "AG109", "AG110", "AG111", "AG112", "AG113", "AG114", "AG115",
"AG116", "AG117", "AG118", "AG119", "AG120", "AG121", "AG122", "AG123",
"AG124", "AG125", "AG126", "AG127", "AG128", "AG129", "AG130", "CD", "CD95",
"CD96", "CD97", "CD98", "CD99", "CD100", "CD101", "CD102", "CD103", "CD104",
"CD105", "CD106", "CD107", "CD108", "CD109", "CD110", "CD111", "CD112",
"CD113", "CD114", "CD115", "CD116", "CD117", "CD118", "CD119", "CD120",
"CD121", "CD122", "CD123", "CD124", "CD125", "CD126", "CD127", "CD128",
"CD129", "CD130", "CD131", "CD132", "IN", "IN97", "IN98", "IN99", "IN100",
"IN101", "IN102", "IN103", "IN104", "IN105", "IN106", "IN107", "IN108",
"IN109", "IN110", "IN111", "IN112", "IN113", "IN114", "IN115", "IN116",
"IN117", "IN118", "IN119", "IN120", "IN121", "IN122", "IN123", "IN124",
"IN125", "IN126", "IN127", "IN128", "IN129", "IN130", "IN131", "IN132",
"IN133", "IN134", "IN135", "SN", "SN99", "SN100", "SN101", "SN102", "SN103",
"SN104", "SN105", "SN106", "SN107", "SN108", "SN109", "SN110", "SN111",
"SN112", "SN113", "SN114", "SN115", "SN116", "SN117", "SN118", "SN119",
"SN120", "SN121", "SN122", "SN123", "SN124", "SN125", "SN126", "SN127",
"SN128", "SN129", "SN130", "SN131", "SN132", "SN133", "SN134", "SN135",
"SN136", "SN137", "SB", "SB103", "SB104", "SB105", "SB106", "SB107", "SB108",
"SB109", "SB110", "SB111", "SB112", "SB113", "SB114", "SB115", "SB116",
"SB117", "SB118", "SB119", "SB120", "SB121", "SB122", "SB123", "SB124",
"SB125", "SB126", "SB127", "SB128", "SB129", "SB130", "SB131", "SB132",
"SB133", "SB134", "SB135", "SB136", "SB137", "SB138", "SB139", "TE", "TE105",
"TE106", "TE107", "TE108", "TE109", "TE110", "TE111", "TE112", "TE113",
"TE114", "TE115", "TE116", "TE117", "TE118", "TE119", "TE120", "TE121",
"TE122", "TE123", "TE124", "TE125", "TE126", "TE127", "TE128", "TE129",
"TE130", "TE131", "TE132", "TE133", "TE134", "TE135", "TE136", "TE137",
"TE138", "TE139", "TE140", "TE141", "TE142", "I", "I108", "I109", "I110",
"I111", "I112", "I113", "I114", "I115", "I116", "I117", "I118", "I119", "I120",
"I121", "I122", "I123", "I124", "I125", "I126", "I127", "I128", "I129", "I130",
"I131", "I132", "I133", "I134", "I135", "I136", "I137", "I138", "I139", "I140",
"I141", "I142", "I143", "I144", "XE", "XE110", "XE111", "XE112", "XE113",
"XE114", "XE115", "XE116", "XE117", "XE118", "XE119", "XE120", "XE121",
"XE122", "XE123", "XE124", "XE125", "XE126", "XE127", "XE128", "XE129",
"XE130", "XE131", "XE132", "XE133", "XE134", "XE135", "XE136", "XE137",
"XE138", "XE139", "XE140", "XE141", "XE142", "XE143", "XE144", "XE145",
"XE146", "XE147", "CS", "CS112", "CS113", "CS114", "CS115", "CS116", "CS117",
"CS118", "CS119", "CS120", "CS121", "CS122", "CS123", "CS124", "CS125",
"CS126", "CS127", "CS128", "CS129", "CS130", "CS131", "CS132", "CS133",
"CS134", "CS135", "CS136", "CS137", "CS138", "CS139", "CS140", "CS141",
"CS142", "CS143", "CS144", "CS145", "CS146", "CS147", "CS148", "CS149",
"CS150", "CS151", "BA", "BA114", "BA115", "BA116", "BA117", "BA118", "BA119",
"BA120", "BA121", "BA122", "BA123", "BA124", "BA125", "BA126", "BA127",
"BA128", "BA129", "BA130", "BA131", "BA132", "BA133", "BA134", "BA135",
"BA136", "BA137", "BA138", "BA139", "BA140", "BA141", "BA142", "BA143",
"BA144", "BA145", "BA146", "BA147", "BA148", "BA149", "BA150", "BA151",
"BA152", "BA153", "LA", "LA117", "LA118", "LA119", "LA120", "LA121", "LA122",
"LA123", "LA124", "LA125", "LA126", "LA127", "LA128", "LA129", "LA130",
"LA131", "LA132", "LA133", "LA134", "LA135", "LA136", "LA137", "LA138",
"LA139", "LA140", "LA141", "LA142", "LA143", "LA144", "LA145", "LA146",
"LA147", "LA148", "LA149", "LA150", "LA151", "LA152", "LA153", "LA154",
"LA155", "CE", "CE119", "CE120", "CE121", "CE122", "CE123", "CE124", "CE125",
"CE126", "CE127", "CE128", "CE129", "CE130", "CE131", "CE132", "CE133",
"CE134", "CE135", "CE136", "CE137", "CE138", "CE139", "CE140", "CE141",
"CE142", "CE143", "CE144", "CE145", "CE146", "CE147", "CE148", "CE149",
"CE150", "CE151", "CE152", "CE153", "CE154", "CE155", "CE156", "CE157", "PR",
"PR121", "PR122", "PR123", "PR124", "PR125", "PR126", "PR127", "PR128",
"PR129", "PR130", "PR131", "PR132", "PR133", "PR134", "PR135", "PR136",
"PR137", "PR138", "PR139", "PR140", "PR141", "PR142", "PR143", "PR144",
"PR145", "PR146", "PR147", "PR148", "PR149", "PR150", "PR151", "PR152",
"PR153", "PR154", "PR155", "PR156", "PR157", "PR158", "PR159", "ND", "ND124",
"ND125", "ND126", "ND127", "ND128", "ND129", "ND130", "ND131", "ND132",
"ND133", "ND134", "ND135", "ND136", "ND137", "ND138", "ND139", "ND140",
"ND141", "ND142", "ND143", "ND144", "ND145", "ND146", "ND147", "ND148",
"ND149", "ND150", "ND151", "ND152", "ND153", "ND154", "ND155", "ND156",
"ND157", "ND158", "ND159", "ND160", "ND161", "PM", "PM126", "PM127", "PM128",
"PM129", "PM130", "PM131", "PM132", "PM133", "PM134", "PM135", "PM136",
"PM137", "PM138", "PM139", "PM140", "PM141", "PM142", "PM143", "PM144",
"PM145", "PM146", "PM147", "PM148", "PM149", "PM150", "PM151", "PM152",
"PM153", "PM154", "PM155", "PM156", "PM157", "PM158", "PM159", "PM160",
"PM161", "PM162", "PM163", "SM", "SM128", "SM129", "SM130", "SM131", "SM132",
"SM133", "SM134", "SM135", "SM136", "SM137", "SM138", "SM139", "SM140",
"SM141", "SM142", "SM143", "SM144", "SM145", "SM146", "SM147", "SM148",
"SM149", "SM150", "SM151", "SM152", "SM153", "SM154", "SM155", "SM156",
"SM157", "SM158", "SM159", "SM160", "SM161", "SM162", "SM163", "SM164",
"SM165", "EU", "EU130", "EU131", "EU132", "EU133", "EU134", "EU135", "EU136",
"EU137", "EU138", "EU139", "EU140", "EU141", "EU142", "EU143", "EU144",
"EU145", "EU146", "EU147", "EU148", "EU149", "EU150", "EU151", "EU152",
"EU153", "EU154", "EU155", "EU156", "EU157", "EU158", "EU159", "EU160",
"EU161", "EU162", "EU163", "EU164", "EU165", "EU166", "EU167", "GD", "GD134",
"GD135", "GD136", "GD137", "GD138", "GD139", "GD140", "GD141", "GD142",
"GD143", "GD144", "GD145", "GD146", "GD147", "GD148", "GD149", "GD150",
"GD151", "GD152", "GD153", "GD154", "GD155", "GD156", "GD157", "GD158",
"GD159", "GD160", "GD161", "GD162", "GD163", "GD164", "GD165", "GD166",
"GD167", "GD168", "GD169", "TB", "TB136", "TB137", "TB138", "TB139", "TB140",
"TB141", "TB142", "TB143", "TB144", "TB145", "TB146", "TB147", "TB148",
"TB149", "TB150", "TB151", "TB152", "TB153", "TB154", "TB155", "TB156",
"TB157", "TB158", "TB159", "TB160", "TB161", "TB162", "TB163", "TB164",
"TB165", "TB166", "TB167", "TB168", "TB169", "TB170", "TB171", "DY", "DY138",
"DY139", "DY140", "DY141", "DY142", "DY143", "DY144", "DY145", "DY146",
"DY147", "DY148", "DY149", "DY150", "DY151", "DY152", "DY153", "DY154",
"DY155", "DY156", "DY157", "DY158", "DY159", "DY160", "DY161", "DY162",
"DY163", "DY164", "DY165", "DY166", "DY167", "DY168", "DY169", "DY170",
"DY171", "DY172", "DY173", "HO", "HO140", "HO141", "HO142", "HO143", "HO144",
"HO145", "HO146", "HO147", "HO148", "HO149", "HO150", "HO151", "HO152",
"HO153", "HO154", "HO155", "HO156", "HO157", "HO158", "HO159", "HO160",
"HO161", "HO162", "HO163", "HO164", "HO165", "HO166", "HO167", "HO168",
"HO169", "HO170", "HO171", "HO172", "HO173", "HO174", "HO175", "ER", "ER143",
"ER144", "ER145", "ER146", "ER147", "ER148", "ER149", "ER150", "ER151",
"ER152", "ER153", "ER154", "ER155", "ER156", "ER157", "ER158", "ER159",
"ER160", "ER161", "ER162", "ER163", "ER164", "ER165", "ER166", "ER167",
"ER168", "ER169", "ER170", "ER171", "ER172", "ER173", "ER174", "ER175",
"ER176", "ER177", "TM", "TM145", "TM146", "TM147", "TM148", "TM149", "TM150",
"TM151", "TM152", "TM153", "TM154", "TM155", "TM156", "TM157", "TM158",
"TM159", "TM160", "TM161", "TM162", "TM163", "TM164", "TM165", "TM166",
"TM167", "TM168", "TM169", "TM170", "TM171", "TM172", "TM173", "TM174",
"TM175", "TM176", "TM177", "TM178", "TM179", "YB", "YB148", "YB149", "YB150",
"YB151", "YB152", "YB153", "YB154", "YB155", "YB156", "YB157", "YB158",
"YB159", "YB160", "YB161", "YB162", "YB163", "YB164", "YB165", "YB166",
"YB167", "YB168", "YB169", "YB170", "YB171", "YB172", "YB173", "YB174",
"YB175", "YB176", "YB177", "YB178", "YB179", "YB180", "YB181", "LU", "LU150",
"LU151", "LU152", "LU153", "LU154", "LU155", "LU156", "LU157", "LU158",
"LU159", "LU160", "LU161", "LU162", "LU163", "LU164", "LU165", "LU166",
"LU167", "LU168", "LU169", "LU170", "LU171", "LU172", "LU173", "LU174",
"LU175", "LU176", "LU177", "LU178", "LU179", "LU180", "LU181", "LU182",
"LU183", "LU184", "HF", "HF153", "HF154", "HF155", "HF156", "HF157", "HF158",
"HF159", "HF160", "HF161", "HF162", "HF163", "HF164", "HF165", "HF166",
"HF167", "HF168", "HF169", "HF170", "HF171", "HF172", "HF173", "HF174",
"HF175", "HF176", "HF177", "HF178", "HF179", "HF180", "HF181", "HF182",
"HF183", "HF184", "HF185", "HF186", "HF187", "HF188", "TA", "TA155", "TA156",
"TA157", "TA158", "TA159", "TA160", "TA161", "TA162", "TA163", "TA164",
"TA165", "TA166", "TA167", "TA168", "TA169", "TA170", "TA171", "TA172",
"TA173", "TA174", "TA175", "TA176", "TA177", "TA178", "TA179", "TA180",
"TA181", "TA182", "TA183", "TA184", "TA185", "TA186", "TA187", "TA188",
"TA189", "TA190", "W", "W158", "W159", "W160", "W161", "W162", "W163", "W164",
"W165", "W166", "W167", "W168", "W169", "W170", "W171", "W172", "W173", "W174",
"W175", "W176", "W177", "W178", "W179", "W180", "W181", "W182", "W183", "W184",
"W185", "W186", "W187", "W188", "W189", "W190", "W191", "W192", "RE", "RE160",
"RE161", "RE162", "RE163", "RE164", "RE165", "RE166", "RE167", "RE168",
"RE169", "RE170", "RE171", "RE172", "RE173", "RE174", "RE175", "RE176",
"RE177", "RE178", "RE179", "RE180", "RE181", "RE182", "RE183", "RE184",
"RE185", "RE186", "RE187", "RE188", "RE189", "RE190", "RE191", "RE192",
"RE193", "RE194", "OS", "OS162", "OS163", "OS164", "OS165", "OS166", "OS167",
"OS168", "OS169", "OS170", "OS171", "OS172", "OS173", "OS174", "OS175",
"OS176", "OS177", "OS178", "OS179", "OS180", "OS181", "OS182", "OS183",
"OS184", "OS185", "OS186", "OS187", "OS188", "OS189", "OS190", "OS191",
"OS192", "OS193", "OS194", "OS195", "OS196", "IR", "IR164", "IR165", "IR166",
"IR167", "IR168", "IR169", "IR170", "IR171", "IR172", "IR173", "IR174",
"IR175", "IR176", "IR177", "IR178", "IR179", "IR180", "IR181", "IR182",
"IR183", "IR184", "IR185", "IR186", "IR187", "IR188", "IR189", "IR190",
"IR191", "IR192", "IR193", "IR194", "IR195", "IR196", "IR197", "IR198",
"IR199", "PT", "PT166", "PT167", "PT168", "PT169", "PT170", "PT171", "PT172",
"PT173", "PT174", "PT175", "PT176", "PT177", "PT178", "PT179", "PT180",
"PT181", "PT182", "PT183", "PT184", "PT185", "PT186", "PT187", "PT188",
"PT189", "PT190", "PT191", "PT192", "PT193", "PT194", "PT195", "PT196",
"PT197", "PT198", "PT199", "PT200", "PT201", "PT202", "AU", "AU169", "AU170",
"AU171", "AU172", "AU173", "AU174", "AU175", "AU176", "AU177", "AU178",
"AU179", "AU180", "AU181", "AU182", "AU183", "AU184", "AU185", "AU186",
"AU187", "AU188", "AU189", "AU190", "AU191", "AU192", "AU193", "AU194",
"AU195", "AU196", "AU197", "AU198", "AU199", "AU200", "AU201", "AU202",
"AU203", "AU204", "AU205", "HG", "HG171", "HG172", "HG173", "HG174", "HG175",
"HG176", "HG177", "HG178", "HG179", "HG180", "HG181", "HG182", "HG183",
"HG184", "HG185", "HG186", "HG187", "HG188", "HG189", "HG190", "HG191",
"HG192", "HG193", "HG194", "HG195", "HG196", "HG197", "HG198", "HG199",
"HG200", "HG201", "HG202", "HG203", "HG204", "HG205", "HG206", "HG207",
"HG208", "HG209", "HG210", "TL", "TL176", "TL177", "TL178", "TL179", "TL180",
"TL181", "TL182", "TL183", "TL184", "TL185", "TL186", "TL187", "TL188",
"TL189", "TL190", "TL191", "TL192", "TL193", "TL194", "TL195", "TL196",
"TL197", "TL198", "TL199", "TL200", "TL201", "TL202", "TL203", "TL204",
"TL205", "TL206", "TL207", "TL208", "TL209", "TL210", "TL211", "TL212", "PB",
"PB178", "PB179", "PB180", "PB181", "PB182", "PB183", "PB184", "PB185",
"PB186", "PB187", "PB188", "PB189", "PB190", "PB191", "PB192", "PB193",
"PB194", "PB195", "PB196", "PB197", "PB198", "PB199", "PB200", "PB201",
"PB202", "PB203", "PB204", "PB205", "PB206", "PB207", "PB208", "PB209",
"PB210", "PB211", "PB212", "PB213", "PB214", "PB215", "BI", "BI184", "BI185",
"BI186", "BI187", "BI188", "BI189", "BI190", "BI191", "BI192", "BI193",
"BI194", "BI195", "BI196", "BI197", "BI198", "BI199", "BI200", "BI201",
"BI202", "BI203", "BI204", "BI205", "BI206", "BI207", "BI208", "BI209",
"BI210", "BI211", "BI212", "BI213", "BI214", "BI215", "BI216", "BI217",
"BI218", "PO", "PO188", "PO189", "PO190", "PO191", "PO192", "PO193", "PO194",
"PO195", "PO196", "PO197", "PO198", "PO199", "PO200", "PO201", "PO202",
"PO203", "PO204", "PO205", "PO206", "PO207", "PO208", "PO209", "PO210",
"PO211", "PO212", "PO213", "PO214", "PO215", "PO216", "PO217", "PO218",
"PO219", "PO220", "AT", "AT193", "AT194", "AT195", "AT196", "AT197", "AT198",
"AT199", "AT200", "AT201", "AT202", "AT203", "AT204", "AT205", "AT206",
"AT207", "AT208", "AT209", "AT210", "AT211", "AT212", "AT213", "AT214",
"AT215", "AT216", "AT217", "AT218", "AT219", "AT220", "AT221", "AT222",
"AT223", "RN", "RN195", "RN196", "RN197", "RN198", "RN199", "RN200", "RN201",
"RN202", "RN203", "RN204", "RN205", "RN206", "RN207", "RN208", "RN209",
"RN210", "RN211", "RN212", "RN213", "RN214", "RN215", "RN216", "RN217",
"RN218", "RN219", "RN220", "RN221", "RN222", "RN223", "RN224", "RN225",
"RN226", "RN227", "RN228", "FR", "FR199", "FR200", "FR201", "FR202", "FR203",
"FR204", "FR205", "FR206", "FR207", "FR208", "FR209", "FR210", "FR211",
"FR212", "FR213", "FR214", "FR215", "FR216", "FR217", "FR218", "FR219",
"FR220", "FR221", "FR222", "FR223", "FR224", "FR225", "FR226", "FR227",
"FR228", "FR229", "FR230", "FR231", "FR232", "RA", "RA202", "RA203", "RA204",
"RA205", "RA206", "RA207", "RA208", "RA209", "RA210", "RA211", "RA212",
"RA213", "RA214", "RA215", "RA216", "RA217", "RA218", "RA219", "RA220",
"RA221", "RA222", "RA223", "RA224", "RA225", "RA226", "RA227", "RA228",
"RA229", "RA230", "RA231", "RA232", "RA233", "RA234", "AC", "AC206", "AC207",
"AC208", "AC209", "AC210", "AC211", "AC212", "AC213", "AC214", "AC215",
"AC216", "AC217", "AC218", "AC219", "AC220", "AC221", "AC222", "AC223",
"AC224", "AC225", "AC226", "AC227", "AC228", "AC229", "AC230", "AC231",
"AC232", "AC233", "AC234", "AC235", "AC236", "TH", "TH209", "TH210", "TH211",
"TH212", "TH213", "TH214", "TH215", "TH216", "TH217", "TH218", "TH219",
"TH220", "TH221", "TH222", "TH223", "TH224", "TH225", "TH226", "TH227",
"TH228", "TH229", "TH230", "TH231", "TH232", "TH233", "TH234", "TH235",
"TH236", "TH237", "TH238", "PA", "PA212", "PA213", "PA214", "PA215", "PA216",
"PA217", "PA218", "PA219", "PA220", "PA221", "PA222", "PA223", "PA224",
"PA225", "PA226", "PA227", "PA228", "PA229", "PA230", "PA231", "PA232",
"PA233", "PA234", "PA235", "PA236", "PA237", "PA238", "PA239", "PA240", "U",
"U217", "U218", "U219", "U220", "U221", "U222", "U223", "U224", "U225", "U226",
"U227", "U228", "U229", "U230", "U231", "U232", "U233", "U234", "U235", "U236",
"U237", "U238", "U239", "U240", "U241", "U242", "NP", "NP225", "NP226",
"NP227", "NP228", "NP229", "NP230", "NP231", "NP232", "NP233", "NP234",
"NP235", "NP236", "NP237", "NP238", "NP239", "NP240", "NP241", "NP242",
"NP243", "NP244", "PU", "PU228", "PU229", "PU230", "PU231", "PU232", "PU233",
"PU234", "PU235", "PU236", "PU237", "PU238", "PU239", "PU240", "PU241",
"PU242", "PU243", "PU244", "PU245", "PU246", "PU247", "AM", "AM231", "AM232",
"AM233", "AM234", "AM235", "AM236", "AM237", "AM238", "AM239", "AM240",
"AM241", "AM242", "AM243", "AM244", "AM245", "AM246", "AM247", "AM248",
"AM249", "CM", "CM233", "CM234", "CM235", "CM236", "CM237", "CM238", "CM239",
"CM240", "CM241", "CM242", "CM243", "CM244", "CM245", "CM246", "CM247",
"CM248", "CM249", "CM250", "CM251", "CM252", "BK", "BK235", "BK236", "BK237",
"BK238", "BK239", "BK240", "BK241", "BK242", "BK243", "BK244", "BK245",
"BK246", "BK247", "BK248", "BK249", "BK250", "BK251", "BK252", "BK253",
"BK254", "CF", "CF237", "CF238", "CF239", "CF240", "CF241", "CF242", "CF243",
"CF244", "CF245", "CF246", "CF247", "CF248", "CF249", "CF250", "CF251",
"CF252", "CF253", "CF254", "CF255", "CF256", "ES", "ES240", "ES241", "ES242",
"ES243", "ES244", "ES245", "ES246", "ES247", "ES248", "ES249", "ES250",
"ES251", "ES252", "ES253", "ES254", "ES255", "ES256", "ES257", "ES258", "FM",
"FM242", "FM243", "FM244", "FM245", "FM246", "FM247", "FM248", "FM249",
"FM250", "FM251", "FM252", "FM253", "FM254", "FM255", "FM256", "FM257",
"FM258", "FM259", "FM260", "MD", "MD245", "MD246", "MD247", "MD248", "MD249",
"MD250", "MD251", "MD252", "MD253", "MD254", "MD255", "MD256", "MD257",
"MD258", "MD259", "MD260", "MD261", "MD262", "NO", "NO248", "NO249", "NO250",
"NO251", "NO252", "NO253", "NO254", "NO255", "NO256", "NO257", "NO258",
"NO259", "NO260", "NO261", "NO262", "NO263", "NO264", "LR", "LR251", "LR252",
"LR253", "LR254", "LR255", "LR256", "LR257", "LR258", "LR259", "LR260",
"LR261", "LR262", "LR263", "LR264", "LR265", "LR266", "RF", "RF253", "RF254",
"RF255", "RF256", "RF257", "RF258", "RF259", "RF260", "RF261", "RF262",
"RF263", "RF264", "RF265", "RF266", "RF267", "RF268", "DB", "DB255", "DB256",
"DB257", "DB258", "DB259", "DB260", "DB261", "DB262", "DB263", "DB264",
"DB265", "DB266", "DB267", "DB268", "DB269", "DB270", "SG", "SG258", "SG259",
"SG260", "SG261", "SG262", "SG263", "SG264", "SG265", "SG266", "SG267",
"SG268", "SG269", "SG270", "SG271", "SG272", "SG273", "BH", "BH260", "BH261",
"BH262", "BH263", "BH264", "BH265", "BH266", "BH267", "BH268", "BH269",
"BH270", "BH271", "BH272", "BH273", "BH274", "BH275", "HS", "HS263", "HS264",
"HS265", "HS266", "HS267", "HS268", "HS269", "HS270", "HS271", "HS272",
"HS273", "HS274", "HS275", "HS276", "HS277", "MT", "MT265", "MT266", "MT267",
"MT268", "MT269", "MT270", "MT271", "MT272", "MT273", "MT274", "MT275",
"MT276", "MT277", "MT278", "MT279", "DS", "DS267", "DS268", "DS269", "DS270",
"DS271", "DS272", "DS273", "DS274", "DS275", "DS276", "DS277", "DS278",
"DS279", "DS280", "DS281", "RG", "RG272", "RG273", "RG274", "RG275", "RG276",
"RG277", "RG278", "RG279", "RG280", "RG281", "RG282", "RG283", "UUB",
"UUB277", "UUB278", "UUB279", "UUB280", "UUB281", "UUB282", "UUB283",
"UUB284", "UUB285", "UUT", "UUT283", "UUT284", "UUT285", "UUT286", "UUT287",
"UUQ", "UUQ285", "UUQ286", "UUQ287", "UUQ288", "UUQ289", "UUP", "UUP287",
"UUP288", "UUP289", "UUP290", "UUP291", "UUH", "UUH289", "UUH290", "UUH291",
"UUH292", "UUS", "UUS291", "UUS292", "UUO", "UUO293"]
_temp_iso_mass = [
1.00782503207, 1.00782503207, 2.01410177785, 2.01410177785, 3.01604927767,
3.01604927767, 4.027806424, 5.035311488, 6.044942594, 7.052749,
4.00260325415, 3.01602931914, 4.00260325415, 5.012223624, 6.018889124,
7.028020618, 8.033921897, 9.043950286, 10.052398837, 7.016004548, 3.030775,
4.027185558, 5.0125378, 6.015122794, 7.016004548, 8.022487362, 9.026789505,
10.035481259, 11.043797715, 12.053780, 9.012182201, 5.040790, 6.019726317,
7.016929828, 8.005305103, 9.012182201, 10.013533818, 11.021657749,
12.026920737, 13.035693007, 14.04289292, 15.053460, 16.061920, 11.009305406,
6.046810, 7.029917901, 8.024607233, 9.013328782, 10.012936992, 11.009305406,
12.014352104, 13.017780217, 14.025404009, 15.031103021, 16.039808829,
17.046989906, 18.056170, 19.063730, 12, 8.037675025, 9.031036689,
10.016853228, 11.011433613, 12, 13.00335483778, 14.0032419887, 15.010599256,
16.014701252, 17.022586116, 18.026759354, 19.034805018, 20.040319754,
21.049340, 22.057200, 14.00307400478, 10.041653674, 11.026090956,
12.018613197, 13.005738609, 14.00307400478, 15.00010889823, 16.006101658,
17.008450261, 18.014078959, 19.017028697, 20.023365807, 21.02710824,
22.034394934, 23.041220, 24.051040, 25.060660, 15.99491461956,
12.034404895, 13.024812213, 14.00859625, 15.003065617, 15.99491461956,
16.999131703, 17.999161001, 19.00358013, 20.004076742, 21.008655886,
22.009966947, 23.015687659, 24.020472917, 25.029460, 26.038340, 27.048260,
28.057810, 18.998403224, 14.035060, 15.018009103, 16.011465724,
17.002095237, 18.000937956, 18.998403224, 19.999981315, 20.999948951,
22.002998815, 23.003574631, 24.008115485, 25.012101747, 26.019615555,
27.026760086, 28.035670, 29.043260, 30.052500, 31.060429, 19.99244017542,
16.025761262, 17.017671504, 18.005708213, 19.001880248, 19.99244017542,
20.993846684, 21.991385113, 22.994466904, 23.993610779, 24.997736888,
26.000461206, 27.007589903, 28.012071575, 29.019385933, 30.024801045,
31.033110, 32.040020, 33.049380, 34.057028, 22.98976928087, 18.025969,
19.013877499, 20.007351328, 20.997655206, 21.994436425, 22.98976928087,
23.990962782, 24.989953968, 25.992633, 26.994076788, 27.998938, 29.002861,
30.008976, 31.013585452, 32.02046656, 33.026719756, 34.035170, 35.042493,
36.051480, 37.059340, 23.985041699, 19.03547, 20.018862545, 21.01171291,
21.999573843, 22.994123669, 23.985041699, 24.985836917, 25.982592929,
26.984340585, 27.983876825, 28.9886, 29.990434, 30.996546, 31.998975,
33.005254, 34.009456424, 35.017340, 36.023000, 37.031400, 38.037570,
39.046772, 40.053930, 26.981538627, 21.028040, 22.019520, 23.007267432,
23.999938865, 24.990428095, 25.986891692, 26.981538627, 27.981910306,
28.980445046, 29.982960256, 30.983946619, 31.988124489, 32.990843336,
33.996851837, 34.999860235, 36.006207204, 37.01067782, 38.017231021,
39.02297, 40.031450, 41.038330, 42.046890, 27.97692653246, 22.034530,
23.025520, 24.011545616, 25.004105574, 25.992329921, 26.986704905,
27.97692653246, 28.9764947, 29.973770171, 30.975363226999998,
31.974148082, 32.97800022, 33.978575524, 34.984583575, 35.986599477,
36.99293608, 37.995633601, 39.002070013, 40.005869121, 41.01456,
42.019790, 43.028660, 44.035260, 30.973761629, 24.034350, 25.020260,
26.011780, 26.999230236, 27.992314761, 28.981800606, 29.978313789,
30.973761629, 31.973907274, 32.971725543, 33.973636257, 34.973314117,
35.97825968, 36.979608946, 37.984156827, 38.986179475, 39.991296951,
40.994335435, 42.001007913, 43.00619, 44.012990, 45.019220, 46.027380,
31.972070999, 26.027880, 27.018833, 28.004372763, 28.996608049,
29.984903249, 30.979554728, 31.972070999, 32.971458759, 33.967866902,
34.969032161, 35.96708076, 36.971125567, 37.971163317, 38.975134306,
39.975451728, 40.979582149, 41.981022419, 42.98715479, 43.99021339,
44.996508112, 46.000750, 47.008590, 48.014170, 49.023619, 34.968852682,
28.028510, 29.014110, 30.004770, 30.992413086, 31.985689901, 32.977451887,
33.973762819, 34.968852682, 35.968306981, 36.965902591, 37.968010425,
38.968008164, 39.970415472, 40.970684525, 41.973254804, 42.974054403,
43.978281071, 44.980286886, 45.98421004, 46.988710, 47.994950, 49.000320,
50.007840, 51.014490, 39.96238312251, 30.021560, 31.012123, 31.997637984,
32.989925709, 33.980271244, 34.975257585, 35.967545105, 36.96677632,
37.962732394, 38.964313231, 39.96238312251, 40.964500611, 41.963045736,
42.965636056, 43.964924033, 44.968039956, 45.968094129, 46.972186792,
47.974540, 48.980520, 49.984430, 50.991630, 51.996780, 53.004940,
38.963706679, 32.021920, 33.007260, 33.998410, 34.988009692, 35.981292235,
36.973375889, 37.969081184, 38.963706679, 39.963998475, 40.961825762,
41.96240281, 42.96071554, 43.961556804, 44.960699493, 45.961976864,
46.961678473, 47.965513535, 48.967450928, 49.972783355, 50.976380,
51.982610, 52.987120, 53.994200, 54.999710, 39.962590983, 34.014120,
35.004940, 35.993087063, 36.985870269, 37.976318452, 38.970719725,
39.962590983, 40.962278062, 41.958618014, 42.958766628, 43.955481754,
44.956186566, 45.953692587, 46.954546006, 47.952534177, 48.955674148,
49.957518962, 50.961499214, 51.9651, 52.970050, 53.974350, 54.980550,
55.985570, 56.992356, 44.955911909, 36.014920, 37.003050, 37.994700,
38.984790002, 39.977967407, 40.969251125, 41.965516429, 42.961150658,
43.959402752, 44.955911909, 45.95517189, 46.952407508, 47.952231468,
48.950023975, 49.952187685, 50.953603368, 51.956675468, 52.959610,
53.963264561, 54.968243949, 55.972870, 56.977790, 57.983710, 58.989220,
59.995710, 47.947946281, 38.009770, 39.001610, 39.990498838, 40.983145,
41.973030902, 42.968522499, 43.959690069, 44.958125616, 45.952631555,
46.951763088, 47.947946281, 48.947869982, 49.944791194, 50.946614955,
51.946897311, 52.949727171, 53.951052401, 54.955265056, 55.958199639,
56.963989137, 57.966970, 58.972930, 59.976760, 60.983200, 61.987490,
62.994420, 50.943959507, 40.011090, 40.999780, 41.991230, 42.980650,
43.97411, 44.965775808, 45.960200481, 46.95490894, 47.952253707,
48.948516101, 49.947158485, 50.943959507, 51.944775479, 52.944337979,
53.946439854, 54.947233701, 55.950530966, 56.952561432, 57.956834136,
58.960207407, 59.965026862, 60.968480, 61.973780, 62.977550, 63.983470,
64.987920, 51.940507472, 42.006430, 42.997710, 43.985549, 44.97964,
45.968358635, 46.962900046, 47.954031716, 48.951335721, 49.946044205,
50.944767431, 51.940507472, 52.940649386, 53.938880395, 54.940839672,
55.940653139, 56.943613013, 57.944353129, 58.948586367, 59.950076033,
60.954717204, 61.95661319, 62.961860, 63.964410, 64.970160, 65.973380,
66.979550, 54.938045141, 44.006870, 44.994510, 45.986720, 46.976100,
47.96852, 48.959618005, 49.95423823, 50.948210787, 51.945565464,
52.941290117, 53.940358854, 54.938045141, 55.93890491, 56.938285378,
57.939981549, 58.940440237, 59.942911246, 60.944652638, 61.94842822,
62.95023999, 63.95424909, 64.956336065, 65.961080, 66.964140, 67.969300,
68.972840, 55.934937475, 45.014578, 46.000810, 46.992890, 47.980504,
48.973610, 49.962988982, 50.956819538, 51.948113875, 52.945307942,
53.939610501, 54.938293357, 55.934937475, 56.935393969, 57.933275558,
58.934875464, 59.934071683, 60.936745281, 61.936767442, 62.940369091,
63.941201265, 64.94538027, 65.946780638, 66.950947244, 67.9537, 68.958780,
69.961460, 70.966720, 71.969620, 58.933195048, 47.011490, 48.001760,
48.989720, 49.981540, 50.970720, 51.963590, 52.954218896, 53.948459635,
54.941999029, 55.939839278, 56.936291373, 57.935752814, 58.933195048,
59.933817059, 60.932475763, 61.934050563, 62.933611611, 63.935809908,
64.93647846, 65.939762004, 66.940889529, 67.944873058, 68.94632, 69.951,
70.9529, 71.957810, 72.960240, 73.965380, 74.968330, 57.935342907,
48.019750, 49.009660, 49.995930, 50.987720, 51.975680, 52.968470,
53.957905495, 54.951330251, 55.942132022, 56.939793526, 57.935342907,
58.934346705, 59.930786372, 60.931056033, 61.928345115, 62.929669374,
63.927965959, 64.930084304, 65.929139334, 66.931569414, 67.931868789,
68.935610269, 69.9365, 70.940736283, 71.942092682, 72.946470, 73.948070,
74.952870, 75.955330, 76.960550, 77.963180, 62.929597474, 51.997180,
52.985550, 53.976710, 54.966050, 55.958560, 56.949211078, 57.944538499,
58.939498028, 59.93736503, 60.933457821, 61.932583745, 62.929597474,
63.929764183, 64.927789485, 65.928868813, 66.927730314, 67.929610889,
68.929429269, 69.932392343, 70.932676833, 71.935820307, 72.936675282,
73.939874862, 74.9419, 75.945275026, 76.947850, 77.951960, 78.954560,
79.960870, 63.929142222, 53.992950, 54.983980, 55.972380, 56.964788,
57.954591555, 58.949263764, 59.941827035, 60.939510635, 61.934329764,
62.933211566, 63.929142222, 64.929240984, 65.926033419, 66.927127345,
67.924844154, 68.926550281, 69.925319274, 70.927721599, 71.926857951,
72.929779104, 73.929458609, 74.932936741, 75.93329357, 76.936958967,
77.938440216, 78.942652, 79.944342348, 80.950480, 81.954420, 82.961030,
68.925573587, 55.994910, 56.982930, 57.974250, 58.963370, 59.957060,
60.949446287, 61.944175238, 62.939294196, 63.936838747, 64.932734754,
65.93158901, 66.928201703, 67.927980084, 68.925573587, 69.926021972,
70.924701349, 71.926366268, 72.925174682, 73.926945762, 74.926500246,
75.928827626, 76.9291543, 77.93160818, 78.93289326, 79.936515781,
80.937752355, 81.942990, 82.946980, 83.952650, 84.957000, 85.963120,
73.921177767, 57.991010, 58.981750, 59.970190, 60.963790, 61.954650,
62.949640, 63.941653, 64.939436406, 65.933843453, 66.93273407,
67.92809424, 68.927964533, 69.924247381, 70.924950954, 71.922075815,
72.923458945, 73.921177767, 74.922858948, 75.921402557, 76.923548591,
77.922852739, 78.925400995, 79.925372392, 80.928820467, 81.929549725,
82.934620, 83.937470, 84.943030, 85.946490, 86.952510, 87.956910,
88.963830, 74.921596478, 59.993130, 60.980620, 61.973200, 62.963690,
63.957572, 64.949564, 65.94471, 66.939186071, 67.936769069, 68.932273675,
69.930924826, 70.927112428, 71.926752283, 72.923824844, 73.923928692,
74.921596478, 75.922394021, 76.920647286, 77.921827281, 78.920947934,
79.922533816, 80.922132287, 81.924504067, 82.924980024, 83.929058,
84.932020, 85.936500, 86.939900, 87.944940, 88.949390, 89.955500,
90.960430, 91.966800, 79.916521271, 64.964660, 65.955210, 66.950090,
67.941798, 68.939557817, 69.933390644, 70.932241822, 71.927112352,
72.926765345, 73.922476436, 74.922523368, 75.919213597, 76.919914038,
77.91730909, 78.918499098, 79.916521271, 80.917992474, 81.916699401,
82.919118473, 83.918462354, 84.922245053, 85.924271579, 86.928521358,
87.931423998, 88.936450, 89.939960, 90.945960, 91.949920, 92.956290,
93.960490, 78.918337087, 66.964790, 67.958516, 68.950106, 69.944792,
70.93874, 71.936644572, 72.931691524, 73.929891034, 74.925776207,
75.924541469, 76.921379082, 77.921145706, 78.918337087, 79.918529296,
80.916290563, 81.916804119, 82.915180421, 83.916478974, 84.915608403,
85.918797577, 86.920711324, 87.924065926, 88.926385334, 89.930627737,
90.933968095, 91.939258714, 92.943050, 93.948680, 94.952870, 95.958530,
96.962800, 85.910610729, 68.965180, 69.955259, 70.949625738, 71.942092038,
72.939289195, 73.933084369, 74.930945746, 75.925910078, 76.92467,
77.920364783, 78.920082431, 79.916378965, 80.916592015, 81.9134836,
82.914136099, 83.911506687, 84.912527331, 85.910610729, 86.913354862,
87.914446969, 88.917630581, 89.919516555, 90.923445215, 91.92615621,
92.931274357, 93.934360, 94.939840, 95.943070, 96.948560, 97.951910,
98.957600, 99.961140, 84.911789737, 70.965320, 71.959080, 72.950561,
73.944264751, 74.93857, 75.935072226, 76.930408, 77.928141, 78.92398946,
79.92251925, 80.918995913, 81.918208598, 82.915109701, 83.914384821,
84.911789737, 85.911167419, 86.909180526, 87.911315588, 88.912278016,
89.914801694, 90.916536958, 91.9197289, 92.922041876, 93.926404946,
94.929302889, 95.934272637, 96.937351916, 97.941790668, 98.945379283,
99.949870, 100.953196445, 101.958870, 87.905612124, 72.965970,
73.956310, 74.949949568, 75.941766782, 76.937944782, 77.93218,
78.929708, 79.924521013, 80.923211846, 81.918401639, 82.917556701,
83.913425275, 84.912932803, 85.909260204, 86.908877124, 87.905612124,
88.907450675, 89.907737888, 90.910203095, 91.911037858, 92.914025634,
93.915361312, 94.919358766, 95.921696802, 96.926152923, 97.928452934,
98.933240926, 99.935351911, 100.940517888, 101.943018987, 102.948950,
103.952330, 104.958580, 88.905848295, 75.958450, 76.949645, 77.943610,
78.937351634, 79.93428, 80.929127468, 81.926792451, 82.922354243,
83.920388264, 84.916433039, 85.914885576, 86.91087573, 87.909501146,
88.905848295, 89.907151886, 90.907304791, 91.908949143, 92.909582713,
93.911595245, 94.912820621, 95.915891343, 96.918133995, 97.92220302,
98.924636204, 99.927756586, 100.93031385, 101.933555695, 102.936730,
103.941050, 104.944870, 105.949790, 106.954140, 107.959480,
89.904704416, 77.955230, 78.949160, 79.9404, 80.937210026, 81.931087,
82.928653801, 83.923250, 84.921471182, 85.916473591, 86.914816252,
87.910226904, 88.9088895, 89.904704416, 90.905645767, 91.905040847,
92.906476006, 93.906315192, 94.9080426, 95.908273386, 96.910953109,
97.912734892, 98.916512106, 99.917761889, 100.921140415, 101.922981285,
102.926599606, 103.928780, 104.933050, 105.935910, 106.940750,
107.943960, 108.949240, 109.952870, 92.906378058, 80.949030,
81.943130, 82.936705382, 83.933570, 84.927912447, 85.925038326,
86.920361108, 87.918332163, 88.913418245, 89.911264845,
90.906996243, 91.907193888, 92.906378058, 93.907283888, 94.906835792,
95.908100647, 96.908098556, 97.910328412, 98.911618375, 99.914181619,
100.915252025, 101.918037614, 102.919143842, 103.922464701,
104.923936545, 105.927970, 106.930310, 107.934840, 108.937630,
109.942440, 110.945650, 111.950830, 112.954700, 97.905408169, 82.948740,
83.940090, 84.936550, 85.930695904, 86.927326502, 87.921953241,
88.919480009, 89.913936896, 90.911750194, 91.906810991, 92.90681261,
93.905088269, 94.905842129, 95.904679477, 96.906021465, 97.905408169,
98.90771187, 99.907477336, 100.910347001, 101.91029736, 102.913207142,
103.913763625, 104.91697461, 105.918136802, 106.921692604, 107.923453,
108.927810, 109.929730, 110.934410, 111.936840, 112.941880, 113.944920,
114.950290, 98.906254747, 84.948830, 85.942880, 86.936530, 87.932678,
88.927167, 89.923556564, 90.918427639, 91.915260166, 92.910248984,
93.909657002, 94.907657084, 95.907871383, 96.906365358, 97.907215966,
98.906254747, 99.90765778, 100.907314659, 101.909215019, 102.909181351,
103.911447454, 104.911660566, 105.914357927, 106.915079572, 107.918461226,
108.919982665, 109.923820483, 110.92569283, 111.929146493, 112.931590,
113.935880, 114.938690, 115.943370, 116.946480, 117.951480, 101.904349312,
86.949180, 87.940260, 88.936110, 89.929890, 90.926292, 91.920120,
92.917052034, 93.911359711, 94.910412929, 95.907597835, 96.9075547,
97.905287132, 98.905939302, 99.904219476, 100.905582087, 101.904349312,
102.906323847, 103.905432701, 104.907752866, 105.907329433,
106.909905089, 107.910173465, 108.913203233, 109.914136041, 110.917696,
111.918965, 112.922487194, 113.924281, 114.928686173, 115.930810,
116.935580, 117.937820, 118.942840, 119.945310, 102.905504292,
88.948837, 89.942870, 90.936550, 91.931980, 92.925740, 93.921698,
94.91589874, 95.914460631, 96.911336797, 97.910708158, 98.908132104,
99.90812155, 100.906163625, 101.906843196, 102.905504292, 103.906655518,
104.905693821, 105.907287135, 106.906748423, 107.908728018, 108.908737289,
109.911136411, 110.911585913, 111.914394159, 112.915530627, 113.918806,
114.920334, 115.924062, 116.925980, 117.930070, 118.932110, 119.936410,
120.938720, 121.943210, 105.903485715, 90.949110, 91.940420, 92.935910,
93.928770, 94.924690, 95.918164359, 96.916479073, 97.912720902,
98.911767833, 99.908505886, 100.908289242, 101.905608544, 102.906087307,
103.904035834, 104.90508492, 105.903485715, 106.905133481, 107.903891701,
108.905950451, 109.905153254, 110.907670734, 111.907314058, 112.910152908,
113.910362638, 114.913683824, 115.914158662, 116.917841338, 117.9189843,
118.923110, 119.924691878, 120.928870, 121.930550, 122.934930, 123.936880,
106.90509682, 92.949780, 93.942780, 94.935480, 95.930680, 96.923972412,
97.921566201, 98.917597178, 99.916104255, 100.912802233, 101.911685,
102.90897272, 103.908629157, 104.906528661, 105.906668921, 106.90509682,
107.905955556, 108.904752292, 109.906107231, 110.905291157, 111.907004814,
112.906566579, 113.908803704, 114.908762698, 115.911359933, 116.911684562,
117.914582768, 118.915665059, 119.918787384, 120.919848046, 121.923530,
122.924900, 123.928640, 124.930430, 125.934500, 126.936770, 127.941170,
128.943690, 129.950448, 113.90335854, 94.949870, 95.939770, 96.934940,
97.927395546, 98.925010, 99.920289525, 100.918681538, 101.914462258,
102.913419246, 103.909849475, 104.909467905, 105.90645941, 106.906617928,
107.904183683, 108.904982293, 109.90300207, 110.904178107, 111.902757809,
112.904401662, 113.90335854, 114.905430969, 115.904755809, 116.907218618,
117.90691453, 118.909921597, 119.909850129, 120.912977363, 121.913332432,
122.917002999, 123.917647616, 124.92124637, 125.922353321, 126.926443864,
127.927762285, 128.932150, 129.933901937, 130.940670, 131.945550,
114.903878484, 96.949540, 97.942140, 98.934220, 99.931110851,
100.926340, 101.924090238, 102.919914188, 103.918296171, 104.91467354,
105.913465411, 106.9102951, 107.90969818, 108.907150507, 109.907165274,
110.905103278, 111.905532331, 112.904057761, 113.904913876,
114.903878484, 115.905259703, 116.904513564, 117.906354367, 118.90584535,
119.907959608, 120.907845822, 121.91027601, 122.910438276, 123.913175231,
124.913600588, 125.916463857, 126.917353091, 127.920172328, 128.92169698,
129.924970049, 130.926851767, 131.93299026, 132.937810, 133.944150,
134.949330, 119.902194676, 98.949330, 99.939044343, 100.936060,
101.930295324, 102.928100, 103.923143223, 104.921349437, 105.91688062,
106.915644329, 107.911925378, 108.911283214, 109.907842791, 110.90773446,
111.904818207, 112.905170577, 113.902778869, 114.903342397, 115.90174053,
116.902951656, 117.901603167, 118.90330763, 119.902194676, 120.90423548,
121.903439046, 122.905720838, 123.905273946, 124.907784125, 125.90765328,
126.910360024, 127.910536624, 128.913479, 129.913967295, 130.916999769,
131.917815713, 132.923829249, 133.928291765, 134.934730, 135.939340,
136.945990, 120.903815686, 102.939690, 103.936472, 104.931486348,
105.928791, 106.924150, 107.922160, 108.918132426, 109.916753, 110.913163,
111.912398009, 112.909371672, 113.909269, 114.906598, 115.906793629,
116.904835941, 117.905528731, 118.903942009, 119.905072427, 120.903815686,
121.905173651, 122.90421397, 123.905935743, 124.905253818, 125.90724748,
126.906923609, 127.909169001, 128.909148442, 129.911656324, 130.911982275,
131.914466896, 132.91525163, 133.920379744, 134.925165771, 135.930350,
136.935310, 137.940790, 138.945980, 129.906224399, 104.943640,
105.937504237, 106.935006, 107.929444597, 108.927415515, 109.922407316,
110.921110692, 111.917013672, 112.915891, 113.912089, 114.911902,
115.90846, 116.908644719, 117.905827581, 118.906403645, 119.904020222,
120.904936424, 121.903043898, 122.904270029, 123.902817896, 124.904430731,
125.903311696, 126.905226336, 127.904463056, 128.906598238, 129.906224399,
130.908523864, 131.90855316, 132.910955306, 133.911368737, 134.916448592,
135.920101246, 136.925322954, 137.929220, 138.934730, 139.938850,
140.944650, 141.949080, 126.904472681, 107.943475, 108.938149417,
109.935242, 110.930276, 111.927970, 112.923640583, 113.921850, 114.918048,
115.916808633, 116.91365, 117.913074, 118.910074, 119.910048173,
120.907366811, 121.907589284, 122.905588965, 123.906209852, 124.904630164,
125.905624153, 126.904472681, 127.905809443, 128.904987722, 129.906674247,
130.906124609, 131.907997381, 132.907796939, 133.909744465, 134.910048121,
135.914653993, 136.91787084, 137.922349591, 138.926099478, 139.931000,
140.935030, 141.940180, 142.944560, 143.949990, 131.904153457, 109.944278068,
110.941602, 111.935623112, 112.933341174, 113.927980306, 114.92629392,
115.921581087, 116.920358735, 117.916178655, 118.915410688, 119.911784244,
120.911461829, 121.908367632, 122.90848191, 123.905893003, 124.906395464,
125.904273634, 126.905183723, 127.903531275, 128.904779435, 129.903508007,
130.905082362, 131.904153457, 132.905910722, 133.905394464, 134.907227495,
135.907218794, 136.911562125, 137.913954475, 138.918792936, 139.921640943,
140.926648049, 141.92970959, 142.935110, 143.938510, 144.944070, 145.947750,
146.953560, 132.905451932, 111.950301, 112.944493274, 113.941450, 114.935910,
115.933367, 116.928670701, 117.926559494, 118.922377304, 119.920677253,
120.917229209, 121.916113434, 122.912996036, 123.912257798, 124.90972827,
125.909451977, 126.907417525, 127.907748866, 128.906064426, 129.906708552,
130.905463926, 131.90643426, 132.905451932, 133.906718475, 134.905977008,
135.907311576, 136.907089473, 137.911016704, 138.913363999, 139.917282354,
140.920045752, 141.924298927, 142.92735175, 143.932076914, 144.93552617,
145.940289423, 146.944155008, 147.949218153, 148.952930, 149.958170,
150.962190, 137.905247237, 113.950675405, 114.947370, 115.941380,
116.938499, 117.933040, 118.930659661, 119.926044974, 120.924054499,
121.919904, 122.918781036, 123.915093603, 124.914472912, 125.911250177,
126.911093797, 127.908317698, 128.908679439, 129.906320811, 130.906941118,
131.905061288, 132.90600749, 133.904508383, 134.905688591, 135.904575945,
136.905827384, 137.905247237, 138.908841341, 139.910604505, 140.914411009,
141.91645341, 142.920626719, 143.922952853, 144.927627032, 145.930219572,
146.934945, 147.937720047, 148.942580, 149.945680, 150.950810, 151.954270,
152.959610, 138.906353267, 116.950068, 117.946730, 118.940990, 119.938070,
120.933010, 121.930710, 122.926240, 123.924574275, 124.920816034,
125.919512667, 126.916375448, 127.915585177, 128.912692815, 129.912368724,
130.91007, 131.910101145, 132.908218, 133.908514011, 134.906976844,
135.907635536, 136.906493598, 137.90711193, 138.906353267, 139.909477645,
140.910962152, 141.91407913, 142.91606272, 143.919599647, 144.921645401,
145.92579346, 146.928235284, 147.932228868, 148.934734, 149.938770,
150.941720, 151.946250, 152.949620, 153.954500, 154.958350, 139.905438706,
118.952760, 119.946640, 120.943420, 121.937910, 122.935400, 123.930410,
124.928440, 125.923971, 126.922731, 127.918911, 128.918102, 129.914736,
130.914422, 131.911460487, 132.91151502, 133.908924821, 134.909151396,
135.907172422, 136.907805577, 137.905991321, 138.906652651, 139.905438706,
140.90827627, 141.909244205, 142.91238591, 143.913647336, 144.917233135,
145.918759009, 146.922673954, 147.92443241, 148.928399883, 149.930408931,
150.933976196, 151.936540, 152.940580, 153.943420, 154.948040, 155.951260,
156.956340, 140.907652769, 120.955364, 121.951810, 122.945960, 123.942960,
124.937830, 125.935310, 126.930830, 127.928791, 128.925095, 129.92359,
130.920259, 131.919255, 132.916330532, 133.915711737, 134.913111745,
135.912691611, 136.910705455, 137.910754636, 138.908938399, 139.909075874,
140.907652769, 141.910044806, 142.910816926, 143.913305245, 144.9145117,
145.917644336, 146.918995992, 147.922135026, 148.923717651, 149.926672997,
150.928318618, 151.931499225, 152.933838905, 153.937518153, 154.940120,
155.944270, 156.947430, 157.951980, 158.955500, 141.907723297, 123.952230,
124.948880, 125.943220, 126.940500, 127.935390, 128.933188, 129.928506,
130.927247, 131.923321237, 132.922348, 133.918790181, 134.91818116,
135.914976035, 136.914567137, 137.911949961, 138.911978288, 139.909552,
140.909609854, 141.907723297, 142.90981429, 143.910087274, 144.912573636,
145.913116939, 146.916100441, 147.916893288, 148.920148842, 149.920890888,
150.923828929, 151.924682219, 152.927698232, 153.929477307, 154.932932,
155.935018114, 156.939030, 157.941600, 158.946090, 159.949090, 160.953880,
144.912749023, 125.957520, 126.951630, 127.948420, 128.943160, 129.940450,
130.935870, 131.933750, 132.929782, 133.928353, 134.924876, 135.923565829,
136.920479493, 137.919548281, 138.916804082, 139.916041789, 140.913555054,
141.912874471, 142.910932616, 143.912590843, 144.912749023, 145.914696305,
146.915138545, 147.917474618, 148.918334155, 149.920983561, 150.921206973,
151.923496795, 152.924116889, 153.926463943, 154.928101267, 155.931056736,
156.933039369, 157.936561407, 158.938970, 159.942990, 160.945860,
161.950290, 162.953680, 151.919732425, 127.958080, 128.954640, 129.948920,
130.946110, 131.940690, 132.938670, 133.933970, 134.93252, 135.928275527,
136.926971746, 137.923243961, 138.922296605, 139.918994687, 140.918476488,
141.915197641, 142.914628338, 143.911999478, 144.913410353, 145.9130409,
146.914897923, 147.914822674, 148.917184735, 149.917275539, 150.919932409,
151.919732425, 152.922097356, 153.922209273, 154.924640161, 155.925527887,
156.928358717, 157.929991317, 158.933211271, 159.935140, 160.938830,
161.941220, 162.945360, 163.948280, 164.952980, 152.921230339, 129.963569,
130.957753, 131.954370, 132.949240, 133.946510, 134.941820, 135.939600,
136.935570, 137.933709, 138.92979228, 139.928087607, 140.92493072,
141.923434945, 142.920297509, 143.918816823, 144.916265237, 145.917205817,
146.916746111, 147.918085895, 148.917931238, 149.919701819, 150.919850161,
151.921744534, 152.921230339, 153.922979237, 154.92289326, 155.924752249,
156.925423647, 157.927845302, 158.929088861, 159.931971, 160.933680,
161.937040, 162.939210, 163.942990, 164.945720, 165.949970, 166.953210,
157.924103912, 133.955370, 134.952570, 135.947340, 136.945020, 137.940120,
138.938240, 139.933674, 140.932126, 141.928116, 142.92674951, 143.922963,
144.921709252, 145.918310608, 146.91909442, 147.918114524, 148.919340915,
149.918658876, 150.920348482, 151.919790996, 152.921749543, 153.920865598,
154.922622022, 155.922122743, 156.923960135, 157.924103912, 158.926388658,
159.927054146, 160.929669211, 161.930984751, 162.933990, 163.935860,
164.939380, 165.941600, 166.945570, 167.948360, 168.952870, 158.925346757,
135.961380, 136.955980, 137.953160, 138.948290, 139.945805049, 140.941448,
141.938744, 142.935121, 143.933045, 144.929274, 145.927246584, 146.924044585,
147.924271701, 148.923245909, 149.923659686, 150.923102543, 151.924074438,
152.923434588, 153.924678019, 154.923505236, 155.924747213, 156.924024604,
157.925413137, 158.925346757, 159.927167606, 160.927569919, 161.929488234,
162.930647536, 163.933350838, 164.934880, 165.937991959, 166.940050,
167.943640, 168.946220, 169.950250, 170.953300, 163.929174751, 137.962490,
138.959540, 139.954010, 140.951350, 141.946366, 142.943830, 143.939254,
144.937425, 145.932845369, 146.9310915, 147.927149831, 148.927304787,
149.925585184, 150.926184601, 151.9247183, 152.92576467, 153.924424457,
154.925753775, 155.92428311, 156.925466095, 157.924409487, 158.925739214,
159.925197517, 160.926933364, 161.926798447, 162.928731159, 163.929174751,
164.931703333, 165.932806741, 166.935655462, 167.937128769, 168.940307614,
169.942390, 170.946200, 171.948760, 172.953000, 164.93032207, 139.968539,
140.963098, 141.959770, 142.954610, 143.951480, 144.947200, 145.944640,
146.940056, 147.937718, 148.933774771, 149.933496182, 150.931688142,
151.931713714, 152.930198789, 153.930601579, 154.929103491, 155.929839,
156.928256188, 157.928941007, 158.927711959, 159.928729478, 160.927854776,
161.929095504, 162.928733903, 163.930233507, 164.93032207, 165.932284162,
166.933132633, 167.935515708, 168.936872273, 169.939618929, 170.94146515,
171.944820, 172.947290, 173.951150, 174.954050, 165.930293061, 142.966340,
143.960380, 144.957390, 145.952000, 146.949490, 147.944550, 148.942306,
149.937913839, 150.937448903, 151.935050389, 152.935063492, 153.932783081,
154.933208949, 155.931064698, 156.931916, 157.929893474, 158.930684066,
159.929083292, 160.929995309, 161.928778264, 162.930032749, 163.929200229,
164.930726003, 165.930293061, 166.932048159, 167.932370224, 168.934590364,
169.935464312, 170.938029808, 171.939356113, 172.942400, 173.944230,
174.947770, 175.950080, 176.954050, 168.93421325, 144.970073, 145.966425,
146.960961, 147.957840, 148.952720, 149.949960, 150.94548349, 151.944422,
152.942012112, 153.941567808, 154.939199459, 155.938979933, 156.936973,
157.936979525, 158.934975, 159.935262801, 160.933549, 161.933994682,
162.932651124, 163.93356, 164.932435492, 165.933554131, 166.932851622,
167.934172776, 168.93421325, 169.935801397, 170.93642944, 171.938400044,
172.939603607, 173.942168605, 174.943836853, 175.946994685, 176.949040,
177.952640, 178.955340, 173.938862089, 147.967420, 148.964040, 149.958420,
150.955400769, 151.950288919, 152.949480, 153.946393928, 154.945782332,
155.942818215, 156.942627848, 157.939865617, 158.940050099, 159.937552344,
160.937901678, 161.93576821, 162.936334305, 163.934489416, 164.935279,
165.933882042, 166.934949605, 167.933896895, 168.935189802, 169.934761837,
170.936325799, 171.936381469, 172.938210787, 173.938862089, 174.94127645,
175.942571683, 176.945260822, 177.94664668, 178.950170, 179.952330,
180.956150, 174.940771819, 149.973228, 150.967577, 151.964120,
152.958767331, 153.957522, 154.954316216, 155.953032523, 156.9500983,
157.949313283, 158.946628776, 159.946033, 160.943572, 161.943277288,
162.941179, 163.941339, 164.939406724, 165.939859, 166.93827,
167.938739111, 168.937651439, 169.938474968, 170.937913136, 171.939085669,
172.938930602, 173.94033748, 174.940771819, 175.94268631, 176.943758055,
177.945954559, 178.947327443, 179.94988116, 180.951970, 181.955040,
182.957570, 183.960910, 179.946549953, 152.970690, 153.964860, 154.963390,
155.959364025, 156.958396, 157.954799366, 158.95399487, 159.950684379,
160.950274844, 161.947210498, 162.947089, 163.944367284, 164.944567,
165.94218, 166.9426, 167.940568, 168.941259, 169.939609, 170.940492,
171.939448301, 172.940513, 173.940046178, 174.941509181, 175.941408631,
176.943220651, 177.943698766, 178.945816145, 179.946549953, 180.949101246,
181.950554096, 182.953530439, 183.955446515, 184.958820, 185.960890,
186.964590, 187.966850, 180.947995763, 154.974592, 155.972303,
156.968192445, 157.966699, 158.963018173, 159.961486056, 160.958417,
161.957291859, 162.954330271, 163.953534, 164.950772514, 165.950512,
166.948093, 167.948047, 168.946011, 169.946175, 170.944476, 171.944895,
172.94375, 173.944454, 174.943737, 175.944857, 176.944472403,
177.945778221, 178.945929535, 179.947464831, 180.947995763, 181.950151849,
182.951372616, 183.954007966, 184.955559375, 185.958552023, 186.960530,
187.963700, 188.965830, 189.969230, 183.950931188, 157.974562, 158.972918,
159.968478805, 160.967357, 161.963497417, 162.962523542, 163.958954382,
164.958279949, 165.955027253, 166.954816014, 167.951808394, 168.95177879,
169.949228482, 170.949451, 171.947292, 172.947689, 173.946079, 174.946717,
175.945634, 176.946643, 177.945876236, 178.947070447, 179.946704459,
180.948197248, 181.948204156, 182.950222951, 183.950931188, 184.953419264,
185.954364127, 186.957160466, 187.958489105, 188.961912868, 189.963181378,
190.966600, 191.968170, 186.955753109, 159.982115, 160.977589119,
161.976002, 162.972080535, 163.970323, 164.967088557, 165.965808,
166.962601, 167.961572608, 168.958791096, 169.958220071, 170.955716,
171.955422961, 172.953243, 173.953115, 174.951381, 175.951623, 176.950328,
177.950989, 178.949987641, 179.950789084, 180.950067916, 181.95121008,
182.950819841, 183.952520756, 184.952954982, 185.954986084, 186.955753109,
187.958114438, 188.959229007, 189.961817977, 190.963125242, 191.965960,
192.967470, 193.970420, 191.96148069, 161.984431, 162.982690,
163.978035649, 164.976762, 165.972690753, 166.971547969, 167.967803678,
168.96701927, 169.963577028, 170.963184819, 171.960023303, 172.959808409,
173.957062202, 174.956945835, 175.954806, 176.954965324, 177.953251241,
178.953816017, 179.952378803, 180.953244, 181.952110186, 182.953126102,
183.952489071, 184.954042265, 185.953838158, 186.955750458, 187.955838228,
188.95814747, 189.958447048, 190.960929718, 191.96148069, 192.964151563,
193.965182083, 194.968126661, 195.969639333, 192.96292643, 163.992201,
164.987520, 165.985824, 166.981665156, 167.979881, 168.976294942, 169.974965,
170.971626042, 171.970456, 172.967501739, 173.966861045, 174.964112895,
175.963648688, 176.9613015, 177.961082, 178.959122266, 179.959229446,
180.957625297, 181.958076296, 182.956846458, 183.957476, 184.956698,
185.957946104, 186.957363361, 187.958853121, 188.958718935, 189.960545968,
190.960594046, 191.962605012, 192.96292643, 193.965078378, 194.965979573,
195.968396542, 196.969653285, 197.972280, 198.973804583, 194.964791134,
165.994855, 166.992979, 167.988150742, 168.986715, 169.982495289,
170.981244542, 171.977347128, 172.976444754, 173.972818767, 174.972420552,
175.968944622, 176.968469481, 177.965648724, 178.965363404, 179.963031477,
180.963097285, 181.961170656, 182.961596703, 183.959922251, 184.960619,
185.959350813, 186.960587, 187.959395391, 188.960833686, 189.959931655,
190.961676661, 191.961038005, 192.962987401, 193.962680253, 194.964791134,
195.964951521, 196.967340182, 197.96789279, 198.970593094, 199.971440677,
200.974512868, 201.975740, 196.966568662, 168.998080, 169.996122,
170.991878881, 171.990035, 172.98623738, 173.984761, 174.981274107,
175.980099, 176.976864908, 177.97603192, 178.973212812, 179.972521124,
180.970079048, 181.969617874, 182.967593034, 183.967451524, 184.965789411,
185.965952703, 186.964567541, 187.965323661, 188.963948286, 189.964700339,
190.963704225, 191.964812953, 192.964149715, 193.96536525, 194.96503464,
195.966569813, 196.966568662, 197.968242303, 198.968765193, 199.970725647,
200.97165724, 201.973805838, 202.975154542, 203.977724, 204.979870,
201.970643011, 171.003760, 171.998832686, 172.997242, 173.992863695,
174.99142327, 175.98735458, 176.986279158, 177.982483143, 178.981833861,
179.978266394, 180.977819311, 181.974689964, 182.974449841, 183.971713051,
184.971899086, 185.96936179, 186.969814236, 187.967577049, 188.968190034,
189.966322449, 190.967157105, 191.965634327, 192.966665421, 193.965439409,
194.966720113, 195.965832649, 196.967212908, 197.966769032, 198.968279932,
199.968326004, 200.970302268, 201.970643011, 202.972872484, 203.973493933,
204.976073386, 205.977514066, 206.982588545, 207.985940, 208.991040,
209.994510, 204.974427541, 176.000590, 176.996427286, 177.994897,
178.991089082, 179.989906, 180.986257447, 181.985667104, 182.982192802,
183.981873122, 184.978791305, 185.978325, 186.975905897, 187.976009782,
188.973588428, 189.973877149, 190.971786154, 191.972225, 192.970672,
193.9712, 194.969774335, 195.970481151, 196.969574511, 197.970483495,
198.969877, 199.970962672, 200.970818891, 201.972105808, 202.97234422,
203.973863522, 204.974427541, 205.97611032, 206.977419429, 207.9820187,
208.985358952, 209.990073689, 210.993477, 211.998228, 207.976652071,
178.003830191, 179.002150, 179.997918173, 180.996623958, 181.992671842,
182.991874629, 183.988142339, 184.987609944, 185.984238945, 186.98391837,
187.980874338, 188.980807, 189.978081517, 190.978265, 191.975785171,
192.976173234, 193.97401207, 194.97454205, 195.972774109, 196.973431124,
197.972033959, 198.97291665, 199.971826675, 200.972884511, 201.972159133,
202.973390521, 203.973043589, 204.974481755, 205.974465278, 206.975896887,
207.976652071, 208.98109012, 209.984188527, 210.988736964, 211.991897543,
212.996581499, 213.999805408, 215.004807, 208.980398734, 184.001124,
184.997625, 185.996597625, 186.993157835, 187.992265154, 188.989199012,
189.988295129, 190.985786119, 191.985457954, 192.982959771, 193.98283396,
194.980650737, 195.980666509, 196.978864454, 197.979206, 198.977671961,
199.978131829, 200.977009036, 201.977742324, 202.976876001, 203.977812736,
204.977389366, 205.97849913, 206.978470679, 207.979742196, 208.980398734,
209.984120371, 210.98726946, 211.991285724, 212.994384666, 213.998711539,
215.001769776, 216.006305943, 217.009470, 218.014316, 208.982430435,
187.999422048, 188.998480562, 189.995101185, 190.994574485, 191.991335149,
192.991025275, 193.988185606, 194.988110728, 195.98553458, 196.98565963,
197.983388616, 198.983666063, 199.981798604, 200.982259764, 201.980757541,
202.981420103, 203.980318121, 204.981203322, 205.980481099, 206.981593173,
207.981245702, 208.982430435, 209.982873673, 210.986653154, 211.988867969,
212.99285728, 213.99520135, 214.999419988, 216.001915035, 217.006334796,
218.008973037, 219.013744, 220.016602, 210.987496271, 192.999843112,
193.998725085, 194.996268098, 195.995788077, 196.993189215, 197.992837202,
198.990532254, 199.990351264, 200.988416999, 201.988630236, 202.986941984,
203.987251326, 204.986074483, 205.986667036, 206.985783502, 207.986589977,
208.986173143, 209.98714771, 210.987496271, 211.990744771, 212.992936646,
213.996371733, 214.99865257, 216.002423257, 217.004718822, 218.008694336,
219.011161691, 220.015407682, 221.018050, 222.022330, 223.025190,
222.017577738, 195.005437696, 196.002115223, 197.001584351, 197.998678663,
198.998370297, 199.9956993, 200.995628335, 201.993263492, 202.993386687,
203.99142874, 204.991718799, 205.990214104, 206.990734225, 207.98964247,
208.990414742, 209.989696216, 210.990600523, 211.990703529, 212.993882668,
213.995362554, 214.998745483, 216.00027437, 217.003927675, 218.005601256,
219.009480204, 220.011393981, 221.015536782, 222.017577738, 223.021790,
224.024090, 225.028440, 226.030890, 227.035407, 228.037986, 222.01755173,
199.007258147, 200.00657249, 201.003860867, 202.003372847, 203.000924647,
204.000653204, 204.99859396, 205.998666066, 206.996949414, 207.997138783,
208.995953555, 209.996407738, 210.995536544, 211.996202244, 212.996189081,
213.998971145, 215.000341497, 216.00319799, 217.004631951, 218.007578322,
219.009252149, 220.012327405, 221.014254762, 222.01755173, 223.019735857,
224.023249951, 225.025565414, 226.029386231, 227.031835938, 228.035729,
229.038450228, 230.042510, 231.045440, 232.049772, 228.031070292,
202.009890686, 203.009271619, 204.006499668, 205.00626857, 206.00382727,
207.003798105, 208.00183994, 209.001991373, 210.000494978, 211.000897987,
211.999794499, 213.000383959, 214.000107894, 215.002719834, 216.003533035,
217.006320327, 218.00714023, 219.010085078, 220.011028384, 221.013917338,
222.01537453, 223.018502171, 224.020211821, 225.023611564, 226.025409823,
227.029177842, 228.031070292, 229.034957577, 230.037056394, 231.041220,
232.043638, 233.048060, 234.050704, 227.027752127, 206.01450498,
207.011949748, 208.011551551, 209.009494863, 210.009435986, 211.007734835,
212.007813822, 213.006607643, 214.006901798, 215.006453625, 216.008720075,
217.009346914, 218.011641453, 219.012420389, 220.014762979, 221.015591248,
222.017843851, 223.019137468, 224.021722866, 225.023229585, 226.026098089,
227.027752127, 228.031021112, 229.033015243, 230.036294178, 231.038558786,
232.042027438, 233.044550, 234.048420, 235.051232, 236.055296,
232.038055325, 209.017715682, 210.015075342, 211.014928413, 212.012980288,
213.01301014, 214.01149977, 215.01173033, 216.011062115, 217.013114328,
218.013284499, 219.015536895, 220.015747762, 221.018183674, 222.018468121,
223.020811448, 224.021466895, 225.023951021, 226.024903069, 227.02770407,
228.028741127, 229.03176243, 230.033133843, 231.036304343, 232.038055325,
233.041581843, 234.04360123, 235.047510074, 236.049870, 237.053894,
238.056496, 231.03588399, 212.023204138, 213.02110934, 214.020918417,
215.019185865, 216.019109564, 217.018323986, 218.020041889, 219.019883143,
220.021875303, 221.021877983, 222.023742, 223.023962273, 224.025625738,
225.026130678, 226.027947753, 227.028805072, 228.031051376, 229.032096793,
230.034540754, 231.03588399, 232.038591592, 233.040247277, 234.043308058,
235.045443615, 236.048681284, 237.051145659, 238.05450271, 239.057260,
240.060980, 238.050788247, 217.024368791, 218.023535671, 219.02491916,
220.024723, 221.026399, 222.026086, 223.0277386, 224.027604778,
225.029390717, 226.029338702, 227.031156367, 228.031374006, 229.033505939,
230.033939784, 231.036293704, 232.037156152, 233.039635207, 234.040952088,
235.043929918, 236.045568006, 237.048730184, 238.050788247, 239.054293299,
240.056591988, 241.060330, 242.062931, 237.048173444, 225.033913933,
226.035145, 227.034956789, 228.036180, 229.036263808, 230.037827597,
231.038245085, 232.040108, 233.040740546, 234.042895038, 235.044063267,
236.0465696, 237.048173444, 238.050946405, 239.052939025, 240.056162182,
241.058252431, 242.06164118, 243.064279, 244.067850, 242.058742611,
228.038742328, 229.040150212, 230.039649886, 231.041101107, 232.041187097,
233.042997375, 234.043317076, 235.04528605, 236.046057964, 237.048409658,
238.049559894, 239.052163381, 240.053813545, 241.056851456, 242.058742611,
243.062003092, 244.064203907, 245.067747154, 246.070204627, 247.074070,
243.06138108, 231.045560, 232.046590, 233.046348, 234.047809, 235.047946,
236.049579, 237.049996, 238.051984324, 239.053024479, 240.055300179,
241.056829144, 242.059549159, 243.06138108, 244.064284847, 245.066452114,
246.069774619, 247.072093, 248.075752, 249.078480, 247.07035354,
233.050771232, 234.050159841, 235.051434, 236.051413, 237.052901,
238.053028697, 239.054957, 240.055529539, 241.057653001, 242.058835824,
243.061389114, 244.062752578, 245.065491249, 246.067223662, 247.07035354,
248.072348508, 249.075953413, 250.078356959, 251.082284605, 252.084870,
247.07030708, 235.056580, 236.057330, 237.057003, 238.058281, 239.058279,
240.059759, 241.060230, 242.061981, 243.063007572, 244.065180774,
245.066361616, 246.068672947, 247.07030708, 248.073086, 249.074986657,
250.07831652, 251.080760172, 252.084310, 253.086880, 254.090600,
251.079586788, 237.062070, 238.061410, 239.062422, 240.062302, 241.063726,
242.063701552, 243.065427, 244.066000689, 245.068048612, 246.068805309,
247.071000589, 248.072184861, 249.074853537, 250.076406066, 251.079586788,
252.081625846, 253.085133145, 254.087322909, 255.091046, 256.093440,
252.082978512, 240.068920, 241.068538, 242.069745, 243.069548, 244.070883,
245.071324, 246.072896, 247.073656, 248.075471, 249.076411, 250.078612,
251.079992142, 252.082978512, 253.084824697, 254.088022021, 255.090273122,
256.093598, 257.095979, 258.099520, 257.095104724, 242.073430, 243.074353,
244.074084, 245.075385, 246.075299023, 247.076847, 248.077194714,
249.079034, 250.079521264, 251.081575017, 252.082466855, 253.085185236,
254.08685422, 255.089962202, 256.091773117, 257.095104724, 258.097076,
259.100595, 260.102678, 258.098431319, 245.080829, 246.081886, 247.081635,
248.082823, 249.083013, 250.084420, 251.084839, 252.086560, 253.087280,
254.089656, 255.091082705, 256.094059025, 257.095541368, 258.098431319,
259.100509, 260.103652, 261.105721, 262.108865, 255.093241131, 248.086596,
249.087833, 250.087510, 251.089012, 252.088976521, 253.090678,
254.090955253, 255.093241131, 256.094282666, 257.09687719, 258.098207,
259.101031, 260.102643, 261.105749, 262.107301, 263.110552, 264.112345,
260.105504, 251.094360, 252.095371, 253.095210, 254.096454, 255.096681,
256.098629, 257.099555, 258.101814, 259.102901, 260.105504, 261.106883,
262.109634, 263.111293, 264.114038, 265.115839, 266.119305, 263.112547,
253.100689, 254.100184, 255.101340, 256.101166194, 257.102990,
258.103489, 259.105637, 260.106440, 261.108766556, 262.109925, 263.112547,
264.113985, 265.116704, 266.117956, 267.121529, 268.123644, 255.107398,
255.107398, 256.108127, 257.107722, 258.109231, 259.109610, 260.111300,
261.112056, 262.114084, 263.114988, 264.117404, 265.118601, 266.121029,
267.122377, 268.125445, 269.127460, 270.130712, 259.114500, 258.113168,
259.114500, 260.114422071, 261.116117, 262.116398, 263.118322, 264.118931,
265.121114693, 266.122065, 267.124425, 268.125606, 269.128755, 270.130329,
271.133472, 272.135158, 273.138220, 262.122892, 260.121970, 261.121664,
262.122892, 263.123035, 264.124604, 265.125147, 266.126942, 267.127650,
268.129755, 269.130694, 270.133616, 271.135179, 272.138032, 273.139618,
274.142440, 275.144250, 263.128558, 263.128558, 264.128394885, 265.130085,
266.130097, 267.131789, 268.132162, 269.134056, 270.134650, 271.137657,
272.139052, 273.141986, 274.143131, 275.145952, 276.147208, 277.149841,
265.136151, 265.136151, 266.137299, 267.137307, 268.138728, 269.139055,
270.140657, 271.141139, 272.143738, 273.144913, 274.147492, 275.148647,
276.151156, 277.152420, 278.154812, 279.156193, 281.162061, 267.144341,
268.143795, 269.145124, 270.144720, 271.146062, 272.146317, 273.148863,
274.149492, 275.152176, 276.153034, 277.155647, 278.156469, 279.158861,
280.159795, 281.162061, 272.153615, 272.153615, 273.153682, 274.155713,
275.156142, 276.158493, 277.159519, 278.161604, 279.162468, 280.164473,
281.165372, 282.167486, 283.168415, 283.171792, 277.163943, 278.164312,
279.166546, 280.167039, 281.169286, 282.169765, 283.171792, 284.172384,
285.174105, 283.176451, 283.176451, 284.178080, 285.178732, 286.180481,
287.181045, 285.183698, 285.183698, 286.183855, 287.185599, 288.185689,
289.187279, 287.191186, 287.191186, 288.192492, 289.192715, 290.194141,
291.194384, 292.199786, 289.198862, 290.198590, 291.200011, 292.199786,
291.206564, 291.206564, 292.207549, 293.214670, 293.214670]
el2mass = dict(zip(_temp_symbol, _temp_mass))
el2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100.
eliso2mass = dict(zip(_temp_iso_symbol, _temp_iso_mass)) # encompasses el2mass
eliso2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100. # encompasses el2mass
#eliso2mass["X0"] = 0. # probably needed, just checking
el2z = dict(zip(_temp_symbol, _temp_z))
el2z["GH"] = 0
z2mass = dict(zip(_temp_z, _temp_mass))
z2el = dict(zip(_temp_z, _temp_symbol))
z2element = dict(zip(_temp_z, _temp_element))
el2element = dict(zip(_temp_symbol, _temp_element))
|
loriab/qcdb
|
qcdb/periodictable.py
|
Python
|
lgpl-3.0
| 78,250
|
[
"CFOUR",
"Psi4"
] |
b704bacb4d34099d5318dbdb8d6f5a0aa1e35578d38e6a9d1d406bd0d95d5f37
|
#!/usr/bin/env python
from setuptools import setup
__version__ = '0.1'
setup(
name = 'kraller',
version = __version__,
description = 'A little application to allow signups with keys for accounts on a server',
author = 'Brian Stack',
author_email = 'bis12@case.edu',
url = 'http://github.com/hacsoc/kraller',
long_description=open("README").read(),
install_requires = [
'flask',
'flask-wtf',
'itsdangerous',
'requests',
],
license = 'BSD',
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
]
)
|
hacsoc/kraller
|
setup.py
|
Python
|
bsd-2-clause
| 728
|
[
"Brian"
] |
aeb47b37984c12589c03878942f13eeb08a7de6d0039d91a5c48d912dc934170
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import warnings
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.pubsub_v1.services.subscriber import SubscriberAsyncClient
from google.pubsub_v1.services.subscriber import SubscriberClient
from google.pubsub_v1.services.subscriber import pagers
from google.pubsub_v1.services.subscriber import transports
from google.pubsub_v1.types import pubsub
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SubscriberClient._get_default_mtls_endpoint(None) is None
assert (
SubscriberClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
SubscriberClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SubscriberClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SubscriberClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert SubscriberClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [SubscriberClient, SubscriberAsyncClient,])
def test_subscriber_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "pubsub.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SubscriberGrpcTransport, "grpc"),
(transports.SubscriberGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_subscriber_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [SubscriberClient, SubscriberAsyncClient,])
def test_subscriber_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "pubsub.googleapis.com:443"
def test_subscriber_client_get_transport_class():
transport = SubscriberClient.get_transport_class()
available_transports = [
transports.SubscriberGrpcTransport,
]
assert transport in available_transports
transport = SubscriberClient.get_transport_class("grpc")
assert transport == transports.SubscriberGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc"),
(
SubscriberAsyncClient,
transports.SubscriberGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
SubscriberClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberClient)
)
@mock.patch.object(
SubscriberAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SubscriberAsyncClient),
)
def test_subscriber_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SubscriberClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SubscriberClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc", "true"),
(
SubscriberAsyncClient,
transports.SubscriberGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc", "false"),
(
SubscriberAsyncClient,
transports.SubscriberGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SubscriberClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberClient)
)
@mock.patch.object(
SubscriberAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SubscriberAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_subscriber_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [SubscriberClient, SubscriberAsyncClient])
@mock.patch.object(
SubscriberClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberClient)
)
@mock.patch.object(
SubscriberAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SubscriberAsyncClient),
)
def test_subscriber_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc"),
(
SubscriberAsyncClient,
transports.SubscriberGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_subscriber_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc", grpc_helpers),
(
SubscriberAsyncClient,
transports.SubscriberGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_subscriber_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_subscriber_client_client_options_from_dict():
with mock.patch(
"google.pubsub_v1.services.subscriber.transports.SubscriberGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SubscriberClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc", grpc_helpers),
(
SubscriberAsyncClient,
transports.SubscriberGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_subscriber_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"pubsub.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
),
scopes=None,
default_host="pubsub.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
@pytest.mark.parametrize("request_type", [pubsub.Subscription, dict,])
def test_create_subscription(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription(
name="name_value",
topic="topic_value",
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter="filter_value",
detached=True,
enable_exactly_once_delivery=True,
)
response = client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.Subscription()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == "name_value"
assert response.topic == "topic_value"
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == "filter_value"
assert response.detached is True
assert response.enable_exactly_once_delivery is True
def test_create_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
client.create_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.Subscription()
@pytest.mark.asyncio
async def test_create_subscription_async(
transport: str = "grpc_asyncio", request_type=pubsub.Subscription
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.Subscription(
name="name_value",
topic="topic_value",
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter="filter_value",
detached=True,
enable_exactly_once_delivery=True,
)
)
response = await client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.Subscription()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == "name_value"
assert response.topic == "topic_value"
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == "filter_value"
assert response.detached is True
assert response.enable_exactly_once_delivery is True
@pytest.mark.asyncio
async def test_create_subscription_async_from_dict():
await test_create_subscription_async(request_type=dict)
def test_create_subscription_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.Subscription()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
call.return_value = pubsub.Subscription()
client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_subscription_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.Subscription()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
await client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_create_subscription_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_subscription(
name="name_value",
topic="topic_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].topic
mock_val = "topic_value"
assert arg == mock_val
arg = args[0].push_config
mock_val = pubsub.PushConfig(push_endpoint="push_endpoint_value")
assert arg == mock_val
arg = args[0].ack_deadline_seconds
mock_val = 2066
assert arg == mock_val
def test_create_subscription_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_subscription(
pubsub.Subscription(),
name="name_value",
topic="topic_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
ack_deadline_seconds=2066,
)
@pytest.mark.asyncio
async def test_create_subscription_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_subscription(
name="name_value",
topic="topic_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].topic
mock_val = "topic_value"
assert arg == mock_val
arg = args[0].push_config
mock_val = pubsub.PushConfig(push_endpoint="push_endpoint_value")
assert arg == mock_val
arg = args[0].ack_deadline_seconds
mock_val = 2066
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_subscription_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_subscription(
pubsub.Subscription(),
name="name_value",
topic="topic_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
ack_deadline_seconds=2066,
)
@pytest.mark.parametrize("request_type", [pubsub.GetSubscriptionRequest, dict,])
def test_get_subscription(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription(
name="name_value",
topic="topic_value",
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter="filter_value",
detached=True,
enable_exactly_once_delivery=True,
)
response = client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == "name_value"
assert response.topic == "topic_value"
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == "filter_value"
assert response.detached is True
assert response.enable_exactly_once_delivery is True
def test_get_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
client.get_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSubscriptionRequest()
@pytest.mark.asyncio
async def test_get_subscription_async(
transport: str = "grpc_asyncio", request_type=pubsub.GetSubscriptionRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.Subscription(
name="name_value",
topic="topic_value",
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter="filter_value",
detached=True,
enable_exactly_once_delivery=True,
)
)
response = await client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == "name_value"
assert response.topic == "topic_value"
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == "filter_value"
assert response.detached is True
assert response.enable_exactly_once_delivery is True
@pytest.mark.asyncio
async def test_get_subscription_async_from_dict():
await test_get_subscription_async(request_type=dict)
def test_get_subscription_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSubscriptionRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
call.return_value = pubsub.Subscription()
client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_get_subscription_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSubscriptionRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
await client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_get_subscription_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_subscription(subscription="subscription_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
def test_get_subscription_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_subscription(
pubsub.GetSubscriptionRequest(), subscription="subscription_value",
)
@pytest.mark.asyncio
async def test_get_subscription_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_subscription), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_subscription(subscription="subscription_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_subscription_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_subscription(
pubsub.GetSubscriptionRequest(), subscription="subscription_value",
)
@pytest.mark.parametrize("request_type", [pubsub.UpdateSubscriptionRequest, dict,])
def test_update_subscription(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription(
name="name_value",
topic="topic_value",
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter="filter_value",
detached=True,
enable_exactly_once_delivery=True,
)
response = client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == "name_value"
assert response.topic == "topic_value"
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == "filter_value"
assert response.detached is True
assert response.enable_exactly_once_delivery is True
def test_update_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription), "__call__"
) as call:
client.update_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSubscriptionRequest()
@pytest.mark.asyncio
async def test_update_subscription_async(
transport: str = "grpc_asyncio", request_type=pubsub.UpdateSubscriptionRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.Subscription(
name="name_value",
topic="topic_value",
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter="filter_value",
detached=True,
enable_exactly_once_delivery=True,
)
)
response = await client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == "name_value"
assert response.topic == "topic_value"
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == "filter_value"
assert response.detached is True
assert response.enable_exactly_once_delivery is True
@pytest.mark.asyncio
async def test_update_subscription_async_from_dict():
await test_update_subscription_async(request_type=dict)
def test_update_subscription_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSubscriptionRequest()
request.subscription.name = "subscription.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription), "__call__"
) as call:
call.return_value = pubsub.Subscription()
client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"subscription.name=subscription.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_subscription_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSubscriptionRequest()
request.subscription.name = "subscription.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
await client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"subscription.name=subscription.name/value",
) in kw["metadata"]
@pytest.mark.parametrize("request_type", [pubsub.ListSubscriptionsRequest, dict,])
def test_list_subscriptions(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSubscriptionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSubscriptionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSubscriptionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_subscriptions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
client.list_subscriptions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSubscriptionsRequest()
@pytest.mark.asyncio
async def test_list_subscriptions_async(
transport: str = "grpc_asyncio", request_type=pubsub.ListSubscriptionsRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.ListSubscriptionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSubscriptionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSubscriptionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_subscriptions_async_from_dict():
await test_list_subscriptions_async(request_type=dict)
def test_list_subscriptions_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSubscriptionsRequest()
request.project = "project/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
call.return_value = pubsub.ListSubscriptionsResponse()
client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "project=project/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_subscriptions_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSubscriptionsRequest()
request.project = "project/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.ListSubscriptionsResponse()
)
await client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "project=project/value",) in kw["metadata"]
def test_list_subscriptions_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSubscriptionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_subscriptions(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project
mock_val = "project_value"
assert arg == mock_val
def test_list_subscriptions_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_subscriptions(
pubsub.ListSubscriptionsRequest(), project="project_value",
)
@pytest.mark.asyncio
async def test_list_subscriptions_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSubscriptionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.ListSubscriptionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_subscriptions(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project
mock_val = "project_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_subscriptions_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_subscriptions(
pubsub.ListSubscriptionsRequest(), project="project_value",
)
def test_list_subscriptions_pager(transport_name: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token="abc",
),
pubsub.ListSubscriptionsResponse(subscriptions=[], next_page_token="def",),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(),], next_page_token="ghi",
),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(), pubsub.Subscription(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("project", ""),)),
)
pager = client.list_subscriptions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, pubsub.Subscription) for i in results)
def test_list_subscriptions_pages(transport_name: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token="abc",
),
pubsub.ListSubscriptionsResponse(subscriptions=[], next_page_token="def",),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(),], next_page_token="ghi",
),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(), pubsub.Subscription(),],
),
RuntimeError,
)
pages = list(client.list_subscriptions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_subscriptions_async_pager():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token="abc",
),
pubsub.ListSubscriptionsResponse(subscriptions=[], next_page_token="def",),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(),], next_page_token="ghi",
),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(), pubsub.Subscription(),],
),
RuntimeError,
)
async_pager = await client.list_subscriptions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, pubsub.Subscription) for i in responses)
@pytest.mark.asyncio
async def test_list_subscriptions_async_pages():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token="abc",
),
pubsub.ListSubscriptionsResponse(subscriptions=[], next_page_token="def",),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(),], next_page_token="ghi",
),
pubsub.ListSubscriptionsResponse(
subscriptions=[pubsub.Subscription(), pubsub.Subscription(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_subscriptions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [pubsub.DeleteSubscriptionRequest, dict,])
def test_delete_subscription(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSubscriptionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
client.delete_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSubscriptionRequest()
@pytest.mark.asyncio
async def test_delete_subscription_async(
transport: str = "grpc_asyncio", request_type=pubsub.DeleteSubscriptionRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSubscriptionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_subscription_async_from_dict():
await test_delete_subscription_async(request_type=dict)
def test_delete_subscription_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSubscriptionRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
call.return_value = None
client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_delete_subscription_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSubscriptionRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_delete_subscription_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_subscription(subscription="subscription_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
def test_delete_subscription_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_subscription(
pubsub.DeleteSubscriptionRequest(), subscription="subscription_value",
)
@pytest.mark.asyncio
async def test_delete_subscription_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_subscription(subscription="subscription_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_subscription_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_subscription(
pubsub.DeleteSubscriptionRequest(), subscription="subscription_value",
)
@pytest.mark.parametrize("request_type", [pubsub.ModifyAckDeadlineRequest, dict,])
def test_modify_ack_deadline(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyAckDeadlineRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_modify_ack_deadline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
client.modify_ack_deadline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyAckDeadlineRequest()
@pytest.mark.asyncio
async def test_modify_ack_deadline_async(
transport: str = "grpc_asyncio", request_type=pubsub.ModifyAckDeadlineRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyAckDeadlineRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_modify_ack_deadline_async_from_dict():
await test_modify_ack_deadline_async(request_type=dict)
def test_modify_ack_deadline_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyAckDeadlineRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
call.return_value = None
client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_modify_ack_deadline_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyAckDeadlineRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_modify_ack_deadline_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.modify_ack_deadline(
subscription="subscription_value",
ack_ids=["ack_ids_value"],
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].ack_ids
mock_val = ["ack_ids_value"]
assert arg == mock_val
arg = args[0].ack_deadline_seconds
mock_val = 2066
assert arg == mock_val
def test_modify_ack_deadline_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.modify_ack_deadline(
pubsub.ModifyAckDeadlineRequest(),
subscription="subscription_value",
ack_ids=["ack_ids_value"],
ack_deadline_seconds=2066,
)
@pytest.mark.asyncio
async def test_modify_ack_deadline_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.modify_ack_deadline(
subscription="subscription_value",
ack_ids=["ack_ids_value"],
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].ack_ids
mock_val = ["ack_ids_value"]
assert arg == mock_val
arg = args[0].ack_deadline_seconds
mock_val = 2066
assert arg == mock_val
@pytest.mark.asyncio
async def test_modify_ack_deadline_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.modify_ack_deadline(
pubsub.ModifyAckDeadlineRequest(),
subscription="subscription_value",
ack_ids=["ack_ids_value"],
ack_deadline_seconds=2066,
)
@pytest.mark.parametrize("request_type", [pubsub.AcknowledgeRequest, dict,])
def test_acknowledge(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.AcknowledgeRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_acknowledge_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
client.acknowledge()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.AcknowledgeRequest()
@pytest.mark.asyncio
async def test_acknowledge_async(
transport: str = "grpc_asyncio", request_type=pubsub.AcknowledgeRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.AcknowledgeRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_acknowledge_async_from_dict():
await test_acknowledge_async(request_type=dict)
def test_acknowledge_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.AcknowledgeRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
call.return_value = None
client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_acknowledge_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.AcknowledgeRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_acknowledge_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.acknowledge(
subscription="subscription_value", ack_ids=["ack_ids_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].ack_ids
mock_val = ["ack_ids_value"]
assert arg == mock_val
def test_acknowledge_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.acknowledge(
pubsub.AcknowledgeRequest(),
subscription="subscription_value",
ack_ids=["ack_ids_value"],
)
@pytest.mark.asyncio
async def test_acknowledge_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.acknowledge(
subscription="subscription_value", ack_ids=["ack_ids_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].ack_ids
mock_val = ["ack_ids_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_acknowledge_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.acknowledge(
pubsub.AcknowledgeRequest(),
subscription="subscription_value",
ack_ids=["ack_ids_value"],
)
@pytest.mark.parametrize("request_type", [pubsub.PullRequest, dict,])
def test_pull(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.PullResponse()
response = client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.PullRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.PullResponse)
def test_pull_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
client.pull()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.PullRequest()
@pytest.mark.asyncio
async def test_pull_async(
transport: str = "grpc_asyncio", request_type=pubsub.PullRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.PullResponse())
response = await client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.PullRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.PullResponse)
@pytest.mark.asyncio
async def test_pull_async_from_dict():
await test_pull_async(request_type=dict)
def test_pull_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.PullRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
call.return_value = pubsub.PullResponse()
client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_pull_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.PullRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.PullResponse())
await client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_pull_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.PullResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
client.pull(
subscription="subscription_value",
return_immediately=True,
max_messages=1277,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].return_immediately
mock_val = True
assert arg == mock_val
arg = args[0].max_messages
mock_val = 1277
assert arg == mock_val
def test_pull_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.pull(
pubsub.PullRequest(),
subscription="subscription_value",
return_immediately=True,
max_messages=1277,
)
@pytest.mark.asyncio
async def test_pull_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pull), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.PullResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.PullResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
await client.pull(
subscription="subscription_value",
return_immediately=True,
max_messages=1277,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].return_immediately
mock_val = True
assert arg == mock_val
arg = args[0].max_messages
mock_val = 1277
assert arg == mock_val
@pytest.mark.asyncio
async def test_pull_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.pull(
pubsub.PullRequest(),
subscription="subscription_value",
return_immediately=True,
max_messages=1277,
)
@pytest.mark.parametrize("request_type", [pubsub.StreamingPullRequest, dict,])
def test_streaming_pull(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_pull), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = iter([pubsub.StreamingPullResponse()])
response = client.streaming_pull(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, pubsub.StreamingPullResponse)
@pytest.mark.asyncio
async def test_streaming_pull_async(
transport: str = "grpc_asyncio", request_type=pubsub.StreamingPullRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_pull), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[pubsub.StreamingPullResponse()]
)
response = await client.streaming_pull(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, pubsub.StreamingPullResponse)
@pytest.mark.asyncio
async def test_streaming_pull_async_from_dict():
await test_streaming_pull_async(request_type=dict)
@pytest.mark.parametrize("request_type", [pubsub.ModifyPushConfigRequest, dict,])
def test_modify_push_config(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyPushConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_modify_push_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
client.modify_push_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyPushConfigRequest()
@pytest.mark.asyncio
async def test_modify_push_config_async(
transport: str = "grpc_asyncio", request_type=pubsub.ModifyPushConfigRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyPushConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_modify_push_config_async_from_dict():
await test_modify_push_config_async(request_type=dict)
def test_modify_push_config_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyPushConfigRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
call.return_value = None
client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_modify_push_config_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyPushConfigRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_modify_push_config_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.modify_push_config(
subscription="subscription_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].push_config
mock_val = pubsub.PushConfig(push_endpoint="push_endpoint_value")
assert arg == mock_val
def test_modify_push_config_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.modify_push_config(
pubsub.ModifyPushConfigRequest(),
subscription="subscription_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
)
@pytest.mark.asyncio
async def test_modify_push_config_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.modify_push_config(
subscription="subscription_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
arg = args[0].push_config
mock_val = pubsub.PushConfig(push_endpoint="push_endpoint_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_modify_push_config_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.modify_push_config(
pubsub.ModifyPushConfigRequest(),
subscription="subscription_value",
push_config=pubsub.PushConfig(push_endpoint="push_endpoint_value"),
)
@pytest.mark.parametrize("request_type", [pubsub.GetSnapshotRequest, dict,])
def test_get_snapshot(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot(name="name_value", topic="topic_value",)
response = client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == "name_value"
assert response.topic == "topic_value"
def test_get_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
client.get_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSnapshotRequest()
@pytest.mark.asyncio
async def test_get_snapshot_async(
transport: str = "grpc_asyncio", request_type=pubsub.GetSnapshotRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.Snapshot(name="name_value", topic="topic_value",)
)
response = await client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == "name_value"
assert response.topic == "topic_value"
@pytest.mark.asyncio
async def test_get_snapshot_async_from_dict():
await test_get_snapshot_async(request_type=dict)
def test_get_snapshot_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSnapshotRequest()
request.snapshot = "snapshot/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
call.return_value = pubsub.Snapshot()
client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "snapshot=snapshot/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_snapshot_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSnapshotRequest()
request.snapshot = "snapshot/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
await client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "snapshot=snapshot/value",) in kw["metadata"]
def test_get_snapshot_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_snapshot(snapshot="snapshot_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].snapshot
mock_val = "snapshot_value"
assert arg == mock_val
def test_get_snapshot_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_snapshot(
pubsub.GetSnapshotRequest(), snapshot="snapshot_value",
)
@pytest.mark.asyncio
async def test_get_snapshot_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_snapshot(snapshot="snapshot_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].snapshot
mock_val = "snapshot_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_snapshot_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_snapshot(
pubsub.GetSnapshotRequest(), snapshot="snapshot_value",
)
@pytest.mark.parametrize("request_type", [pubsub.ListSnapshotsRequest, dict,])
def test_list_snapshots(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSnapshotsResponse(
next_page_token="next_page_token_value",
)
response = client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSnapshotsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSnapshotsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_snapshots_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
client.list_snapshots()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSnapshotsRequest()
@pytest.mark.asyncio
async def test_list_snapshots_async(
transport: str = "grpc_asyncio", request_type=pubsub.ListSnapshotsRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.ListSnapshotsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSnapshotsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSnapshotsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_snapshots_async_from_dict():
await test_list_snapshots_async(request_type=dict)
def test_list_snapshots_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSnapshotsRequest()
request.project = "project/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
call.return_value = pubsub.ListSnapshotsResponse()
client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "project=project/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_snapshots_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSnapshotsRequest()
request.project = "project/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.ListSnapshotsResponse()
)
await client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "project=project/value",) in kw["metadata"]
def test_list_snapshots_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSnapshotsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_snapshots(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project
mock_val = "project_value"
assert arg == mock_val
def test_list_snapshots_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_snapshots(
pubsub.ListSnapshotsRequest(), project="project_value",
)
@pytest.mark.asyncio
async def test_list_snapshots_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSnapshotsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.ListSnapshotsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_snapshots(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project
mock_val = "project_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_snapshots_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_snapshots(
pubsub.ListSnapshotsRequest(), project="project_value",
)
def test_list_snapshots_pager(transport_name: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(), pubsub.Snapshot(),],
next_page_token="abc",
),
pubsub.ListSnapshotsResponse(snapshots=[], next_page_token="def",),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(),], next_page_token="ghi",
),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("project", ""),)),
)
pager = client.list_snapshots(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, pubsub.Snapshot) for i in results)
def test_list_snapshots_pages(transport_name: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(), pubsub.Snapshot(),],
next_page_token="abc",
),
pubsub.ListSnapshotsResponse(snapshots=[], next_page_token="def",),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(),], next_page_token="ghi",
),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(),],
),
RuntimeError,
)
pages = list(client.list_snapshots(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_snapshots_async_pager():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(), pubsub.Snapshot(),],
next_page_token="abc",
),
pubsub.ListSnapshotsResponse(snapshots=[], next_page_token="def",),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(),], next_page_token="ghi",
),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(),],
),
RuntimeError,
)
async_pager = await client.list_snapshots(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, pubsub.Snapshot) for i in responses)
@pytest.mark.asyncio
async def test_list_snapshots_async_pages():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(), pubsub.Snapshot(),],
next_page_token="abc",
),
pubsub.ListSnapshotsResponse(snapshots=[], next_page_token="def",),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(),], next_page_token="ghi",
),
pubsub.ListSnapshotsResponse(
snapshots=[pubsub.Snapshot(), pubsub.Snapshot(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_snapshots(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [pubsub.CreateSnapshotRequest, dict,])
def test_create_snapshot(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot(name="name_value", topic="topic_value",)
response = client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.CreateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == "name_value"
assert response.topic == "topic_value"
def test_create_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
client.create_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.CreateSnapshotRequest()
@pytest.mark.asyncio
async def test_create_snapshot_async(
transport: str = "grpc_asyncio", request_type=pubsub.CreateSnapshotRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.Snapshot(name="name_value", topic="topic_value",)
)
response = await client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.CreateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == "name_value"
assert response.topic == "topic_value"
@pytest.mark.asyncio
async def test_create_snapshot_async_from_dict():
await test_create_snapshot_async(request_type=dict)
def test_create_snapshot_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.CreateSnapshotRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
call.return_value = pubsub.Snapshot()
client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_snapshot_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.CreateSnapshotRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
await client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_create_snapshot_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_snapshot(
name="name_value", subscription="subscription_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
def test_create_snapshot_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_snapshot(
pubsub.CreateSnapshotRequest(),
name="name_value",
subscription="subscription_value",
)
@pytest.mark.asyncio
async def test_create_snapshot_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_snapshot(
name="name_value", subscription="subscription_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].subscription
mock_val = "subscription_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_snapshot_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_snapshot(
pubsub.CreateSnapshotRequest(),
name="name_value",
subscription="subscription_value",
)
@pytest.mark.parametrize("request_type", [pubsub.UpdateSnapshotRequest, dict,])
def test_update_snapshot(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot(name="name_value", topic="topic_value",)
response = client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == "name_value"
assert response.topic == "topic_value"
def test_update_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_snapshot), "__call__") as call:
client.update_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSnapshotRequest()
@pytest.mark.asyncio
async def test_update_snapshot_async(
transport: str = "grpc_asyncio", request_type=pubsub.UpdateSnapshotRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pubsub.Snapshot(name="name_value", topic="topic_value",)
)
response = await client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == "name_value"
assert response.topic == "topic_value"
@pytest.mark.asyncio
async def test_update_snapshot_async_from_dict():
await test_update_snapshot_async(request_type=dict)
def test_update_snapshot_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSnapshotRequest()
request.snapshot.name = "snapshot.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_snapshot), "__call__") as call:
call.return_value = pubsub.Snapshot()
client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "snapshot.name=snapshot.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_snapshot_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSnapshotRequest()
request.snapshot.name = "snapshot.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_snapshot), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
await client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "snapshot.name=snapshot.name/value",) in kw[
"metadata"
]
@pytest.mark.parametrize("request_type", [pubsub.DeleteSnapshotRequest, dict,])
def test_delete_snapshot(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
client.delete_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSnapshotRequest()
@pytest.mark.asyncio
async def test_delete_snapshot_async(
transport: str = "grpc_asyncio", request_type=pubsub.DeleteSnapshotRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_snapshot_async_from_dict():
await test_delete_snapshot_async(request_type=dict)
def test_delete_snapshot_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSnapshotRequest()
request.snapshot = "snapshot/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
call.return_value = None
client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "snapshot=snapshot/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_snapshot_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSnapshotRequest()
request.snapshot = "snapshot/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "snapshot=snapshot/value",) in kw["metadata"]
def test_delete_snapshot_flattened():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_snapshot(snapshot="snapshot_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].snapshot
mock_val = "snapshot_value"
assert arg == mock_val
def test_delete_snapshot_flattened_error():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_snapshot(
pubsub.DeleteSnapshotRequest(), snapshot="snapshot_value",
)
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_snapshot(snapshot="snapshot_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].snapshot
mock_val = "snapshot_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_error_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_snapshot(
pubsub.DeleteSnapshotRequest(), snapshot="snapshot_value",
)
@pytest.mark.parametrize("request_type", [pubsub.SeekRequest, dict,])
def test_seek(request_type, transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.seek), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.SeekResponse()
response = client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.SeekRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.SeekResponse)
def test_seek_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.seek), "__call__") as call:
client.seek()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.SeekRequest()
@pytest.mark.asyncio
async def test_seek_async(
transport: str = "grpc_asyncio", request_type=pubsub.SeekRequest
):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.seek), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.SeekResponse())
response = await client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.SeekRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.SeekResponse)
@pytest.mark.asyncio
async def test_seek_async_from_dict():
await test_seek_async(request_type=dict)
def test_seek_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.SeekRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.seek), "__call__") as call:
call.return_value = pubsub.SeekResponse()
client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_seek_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.SeekRequest()
request.subscription = "subscription/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.seek), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.SeekResponse())
await client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "subscription=subscription/value",) in kw[
"metadata"
]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SubscriberClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SubscriberClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SubscriberClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SubscriberClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SubscriberClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SubscriberGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SubscriberGrpcTransport,)
def test_subscriber_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SubscriberTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_subscriber_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.pubsub_v1.services.subscriber.transports.SubscriberTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SubscriberTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_subscription",
"get_subscription",
"update_subscription",
"list_subscriptions",
"delete_subscription",
"modify_ack_deadline",
"acknowledge",
"pull",
"streaming_pull",
"modify_push_config",
"get_snapshot",
"list_snapshots",
"create_snapshot",
"update_snapshot",
"delete_snapshot",
"seek",
"set_iam_policy",
"get_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_subscriber_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.pubsub_v1.services.subscriber.transports.SubscriberTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SubscriberTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
),
quota_project_id="octopus",
)
def test_subscriber_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.pubsub_v1.services.subscriber.transports.SubscriberTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SubscriberTransport()
adc.assert_called_once()
def test_subscriber_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SubscriberClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport,],
)
def test_subscriber_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SubscriberGrpcTransport, grpc_helpers),
(transports.SubscriberGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_subscriber_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"pubsub.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
),
scopes=["1", "2"],
default_host="pubsub.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport],
)
def test_subscriber_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_subscriber_host_no_port():
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="pubsub.googleapis.com"
),
)
assert client.transport._host == "pubsub.googleapis.com:443"
def test_subscriber_host_with_port():
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="pubsub.googleapis.com:8000"
),
)
assert client.transport._host == "pubsub.googleapis.com:8000"
def test_subscriber_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SubscriberGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_subscriber_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SubscriberGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport],
)
def test_subscriber_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport],
)
def test_subscriber_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_snapshot_path():
project = "squid"
snapshot = "clam"
expected = "projects/{project}/snapshots/{snapshot}".format(
project=project, snapshot=snapshot,
)
actual = SubscriberClient.snapshot_path(project, snapshot)
assert expected == actual
def test_parse_snapshot_path():
expected = {
"project": "whelk",
"snapshot": "octopus",
}
path = SubscriberClient.snapshot_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_snapshot_path(path)
assert expected == actual
def test_subscription_path():
project = "oyster"
subscription = "nudibranch"
expected = "projects/{project}/subscriptions/{subscription}".format(
project=project, subscription=subscription,
)
actual = SubscriberClient.subscription_path(project, subscription)
assert expected == actual
def test_parse_subscription_path():
expected = {
"project": "cuttlefish",
"subscription": "mussel",
}
path = SubscriberClient.subscription_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_subscription_path(path)
assert expected == actual
def test_topic_path():
project = "winkle"
topic = "nautilus"
expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic,)
actual = SubscriberClient.topic_path(project, topic)
assert expected == actual
def test_parse_topic_path():
expected = {
"project": "scallop",
"topic": "abalone",
}
path = SubscriberClient.topic_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_topic_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SubscriberClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = SubscriberClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = SubscriberClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = SubscriberClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = SubscriberClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = SubscriberClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = SubscriberClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = SubscriberClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SubscriberClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = SubscriberClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SubscriberTransport, "_prep_wrapped_messages"
) as prep:
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SubscriberTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SubscriberClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_set_iam_policy(transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.SetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.SetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_set_iam_policy_from_dict_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
response = await client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_get_iam_policy(transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.GetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.GetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_get_iam_policy_from_dict_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
response = await client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_test_iam_permissions(transport: str = "grpc"):
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.TestIamPermissionsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.TestIamPermissionsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_field_headers():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict():
client = SubscriberClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_test_iam_permissions_from_dict_async():
client = SubscriberAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
response = await client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_transport_close_async():
client = SubscriberAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = SubscriberClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(SubscriberClient, transports.SubscriberGrpcTransport),
(SubscriberAsyncClient, transports.SubscriberGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-pubsub
|
tests/unit/gapic/pubsub_v1/test_subscriber.py
|
Python
|
apache-2.0
| 195,501
|
[
"Octopus"
] |
d761556ddcfb740042fc51cecb3b2a6fc23dc36d5585444283c4375e4f511d03
|
POPULAR_PREFIXES = {}
PREFIX_SCORES = {}
POPULAR_PREFIXES[2] = """
ma
ca
se
co
ba
sa
lo
pa
re
th
ho
ha
mo
sh
bi
be
te
la
ne
fi
me
ch
in
al
li
bo
go
po
to
st
so
ta
fa
mi
da
tr
do
le
vi
bu
my
no
su
di
si
ra
wa
an
ar
ga
he
ro
fo
jo
wi
pr
pe
de
pi
gr
ge
ki
as
na
ka
cl
hi
fl
ti
dr
sp
fu
ad
yo
on
bl
wo
ke
us
ea
pl
wh
am
ri
fr
br
pu
ja
ai
gu
cr
ac
sc
ni
ju
mu
en
el
ce
tu
va
gi
ko
ru
ci
at
hu
ab
ve
cu
is
we
lu
fe
ec
du
sm
un
ww
or
ou
im
id
vo
ap
au
ed
je
qu
ev
ic
sk
ya
em
e-
ji
ag
gl
es
ex
sl
cn
ye
er
ze
op
tw
ku
za
ol
av
up
nu
by
cs
51
sy
sz
il
sw
of
ir
xi
ip
mr
af
yu
zi
zo
10
bj
dj
pc
sn
eu
eb
ey
12
cc
52
cy
oh
ph
hy
zh
et
aa
ok
kr
ur
kn
om
it
mm
sd
mc
ib
i-
od
ak
tv
ps
bb
if
ah
os
ep
ms
ss
ob
cp
yi
11
ez
ty
eg
az
mp
ow
ia
oi
nx
cd
36
js
ly
qq
ot
ov
ny
oc
xx
17
gz
gp
kl
3d
ae
05
aw
dy
88
mt
99
hn
ts
hd
20
ay
gs
dn
ds
dc
ig
hb
sf
dv
hk
uk
tt
qi
ul
ut
02
16
18
ef
hs
tc
fs
pp
xa
oz
cm
zj
ek
gd
zu
um
00
ky
zz
nc
a-
ct
nb
zg
uc
nj
ks
gh
91
ub
13
dl
ug
tx
sb
rs
24
kh
07
aq
dd
sj
gm
ry
xe
ml
hr
mb
77
jp
jc
ik
jm
wr
ei
md
dm
wu
ao
iv
xy
io
gy
aj
bs
ht
db
66
bc
cq
sr
cb
bd
eq
xu
bm
fx
21
gc
tm
xt
bt
ee
80
dg
ie
pd
hz
jx
tj
m-
sq
cf
ns
ax
xm
pt
jj
iw
gt
rc
cz
hh
dh
jl
fc
oo
55
ii
zy
jd
nt
t-
gg
nh
cg
ls
xs
01
sx
ih
py
lc
wm
22
hc
xp
rh
tl
ff
g-
vu
jn
oa
iz
bh
c-
ft
ox
ll
pm
s-
km
sg
ws
xo
ew
yy
eh
ys
df
xj
b-
og
tn
ua
x-
3g
yn
hl
dt
wy
qa
kw
33
fj
50
58
yt
rt
mn
19
kk
jb
bn
d-
92
sv
nw
qd
jr
mg
15
rm
mk
fm
86
kc
ng
56
30
vs
rp
jy
oy
mh
lt
nm
kt
jh
zs
yc
xc
bg
hm
wc
uu
fb
lv
wz
nd
vn
yl
hp
gn
xl
hf
23
wx
bp
mj
ym
wb
yx
pk
dz
nn
gb
jk
ud
lg
vr
gw
fy
k-
tb
xz
gx
td
fz
gf
nf
59
1s
uf
wp
08
tz
jt
31
dw
wt
cj
mv
rd
dp
tg
90
yz
kb
j-
xb
jz
nv
rx
eo
cv
tf
pb
tk
vl
mf
u-
vc
lp
rr
zq
98
lm
n-
32
25
lh
03
97
f-
kd
bf
hw
nr
bk
nl
rf
wd
lb
hx
wf
cw
cx
np
a1
ln
oe
vv
fp
wl
xf
dk
tp
xh
xd
ej
ld
rb
35
dx
h-
zb
qo
jw
yh
rv
pv
vt
kp
pg
5i
zx
kf
09
lz
yb
mx
lf
fh
p-
ck
iq
vb
oj
40
yw
qs
bw
vp
yd
pj
pn
57
vm
89
14
78
ql
ue
xr
hg
mw
r-
qz
jf
mz
68
uv
hj
wg
ui
04
fd
ix
zd
lx
xg
wj
bz
vg
zm
94
yg
45
lj
pf
uo
v-
rj
o-
gk
yp
44
gj
kg
39
81
kj
kv
b2
h2
zl
zt
28
1-
rg
qw
iy
yr
fn
qr
vd
pw
iu
37
26
82
60
zw
qe
69
hq
zc
w-
uh
41
54
yj
53
qc
nk
qy
06
nz
vw
85
q-
qt
ij
uw
96
uz
38
rl
fw
61
67
rn
yf
70
qh
wn
vf
qv
3a
fg
uy
lw
jv
rw
rz
l-
l2
xn
px
3s
yk
jg
qb
3r
q8
vy
hv
4s
gv
2s
xw
wv
65
zn
lk
83
c2
95
71
42
87
dq
vk
34
72
a2
wk
rk
zr
f1
2b
75
4t
zf
w3
vh
yq
73
bx
xq
qp
qm
zp
z-
29
1m
y-
bv
4m
93
1c
gq
vq
76
vz
kz
5a
4d
d2
lr
kx
jq
vj
zv
m2
oq
ux
qf
63
tq
fk
1a
d3
5d
m1
lq
2-
84
xv
uj
3-
e3
pz
zk
27
4u
4f
xk
43
2n
qx
4g
yv
1t
qn
4p
9i
2m
k9
79
mq
2c
7c
62
p2
3p
e2
1p
1b
3c
vx
3w
qj
5s
74
4c
2d
1d
7a
2p
4-
c4
5t
s3
s2
7s
d1
pq
g1
4a
3b
uq
i2
4e
qg
p3
3x
fv
48
49
1w
a7
fq
2a
i3
nq
4b
3m
m3
2g
o2
46
rq
7l
7-
1f
g2
s1
2t
2l
1l
qk
u2
3t
k1
64
3l
3f
3e
9s
9t
wq
4x
4w
9a
kq
47
2h
7t
i0
5u
n2
c3
t1
8s
r2
k2
0-
b1
2f
7d
7o
bq
t2
e8
1g
1h
e0
1k
x1
5l
4y
4r
3k
3n
8m
6s
p1
b4
2e
1r
1o
7b
7h
5m
r1
v3
6d
l7
4k
t3
2w
7e
w1
4l
4h
g3
9l
8d
7m
m0
a3
s4
2i
1n
x2
m4
s7
n1
1y
1e
7k
v2
5y
5c
c1
3y
8g
r3
7r
i1
5k
5-
b3
a5
4n
g4
9c
9d
e1
2u
8i
8t
e7
w2
0d
5f
a8
4i
3q
3o
9-
2o
8l
8b
1u
1i
7f
7p
6t
5e
4j
3h
2y
2k
7i
7y
6a
6h
6r
5h
5b
3z
9m
9b
9p
9w
y2
8c
1j
7g
w0
6f
i5
u8
t7
e4
q1
j2
p4
i9
5p
5j
n0
l3
h1
3i
l1
9r
2r
e5
7x
k8
q2
t9
0p
0c
p0
b7
o1
u1
3v
9k
2x
e9
8p
d8
d4
d0
w8
0a
5w
a4
a6
4v
z1
m6
s0
s6
8k
8a
r4
d7
6k
c0
i4
i7
5x
a0
n3
g0
t6
9u
m7
8f
r0
r8
1v
1x
x3
j1
6l
6w
v1
5r
h3
t0
9n
9g
9y
f2
s8
2j
7n
6-
6m
h0
h5
g8
t4
9o
m8
f4
8w
x4
7v
k0
k7
k5
d9
j3
0s
0x
0b
0l
6b
6p
v8
c8
b0
h4
t5
z5
z6
m9
1q
7w
7z
k4
c9
0r
t8
3j
9e
9z
e6
f3
f8
2v
8-
k3
d5
5g
4o
g5
9f
m5
f0
y5
8j
8q
x6
x5
k6
0w
6n
6v
c6
p8
5z
5o
b5
3u
z9
9q
f5
f7
s5
y6
8h
u0
1z
7u
d6
q3
6c
6y
c5
v5
v9
l0
l4
n8
a9
z3
9x
y9
2q
8y
8r
r7
j0
j5
0u
6o
6q
z8
z7
9h
9j
s9
y3
8n
x7
n4
q7
0g
0m
6e
6g
v6
y1
u7
u3
w6
z2
n9
8x
8u
q6
j7
w5
w4
0k
c7
i8
v0
5q
b6
o6
u5
4z
o0
z0
f6
y8
2z
8o
u6
x0
q4
j9
0n
p5
i6
l8
h8
h9
h6
4q
z4
9v
y0
8e
8z
8v
7j
q0
w9
0q
0f
6i
p6
l9
n6
y7
x8
q5
j8
w7
6x
v7
o8
o3
h7
g7
g6
g9
f9
r6
j6
0v
0t
6u
p9
5n
x9
v4
b9
l6
n7
u4
r9
0z
0h
0o
6j
o9
n5
u9
7q
y4
0y
5v
b8
q9
0e
0j
0i
6z
o7
r5
l5
o4
j4
p7
o5
""".split()
for index, name in enumerate(POPULAR_PREFIXES[2]):
PREFIX_SCORES[name] = (1332 - index) / 3996.0
POPULAR_PREFIXES[3] = """
the
car
sho
chi
sex
you
new
pro
for
mar
sta
tra
net
pla
hot
cha
art
all
fre
par
man
lin
ban
www
fun
fin
min
tea
get
top
gam
com
buy
win
rea
eas
job
big
boo
dea
bar
sha
see
gre
sun
fil
cas
por
che
can
hea
tri
red
sto
han
lov
bes
tru
int
goo
inf
dat
eco
got
bet
kin
met
web
hel
blo
mon
one
med
air
clu
gra
lea
eve
fan
pri
wor
mai
mad
fir
shi
her
mod
she
cal
per
sal
war
thi
cat
sol
con
not
far
use
bea
sel
fla
bit
cor
uni
hom
kid
ask
foo
sit
wil
pop
dog
bus
blu
spo
liv
lan
mas
ind
bid
bra
des
gol
lif
jus
key
mus
col
san
pay
ste
dis
fly
fac
log
hos
ver
pet
dar
spa
ten
map
cam
nor
cit
sur
ide
mis
gro
eye
fit
har
our
law
sen
loo
and
pin
tim
tre
pen
mor
cel
out
pre
stu
aut
tur
cla
flo
dan
roo
son
tec
van
nam
fee
men
hit
mat
pic
mov
tax
tha
coo
pur
sof
cli
tal
nic
act
onl
wal
sam
sou
fas
qui
too
pol
loc
pos
vid
now
run
hal
sin
day
pho
let
tel
bla
bad
who
cod
bal
any
add
bil
sea
sup
ope
loa
saf
bas
bio
its
bab
mil
mob
vis
old
vie
hol
las
res
dre
don
fai
are
joy
box
wan
two
ser
cra
ali
pas
tin
kar
ice
spe
bri
lon
tan
los
hai
biz
dig
pai
yes
glo
mac
leg
ren
sma
cre
age
may
gen
sav
plu
ant
off
bod
wel
tex
sca
mag
wes
ins
str
arm
val
gir
luc
eur
sat
rad
how
lis
mix
die
pat
via
nex
fix
hat
gas
app
ala
set
mak
cos
boy
sim
max
del
ran
dra
ful
gun
fis
ben
wow
ima
mer
pac
sig
try
ter
say
lot
dia
cle
vip
led
lad
wat
bel
bac
why
usa
mys
low
asi
pan
tak
way
eat
cut
tes
nan
end
rai
cop
bug
ani
wha
bro
bed
pea
cho
tee
hou
kee
ski
mal
pag
mot
dee
dom
oil
poo
tas
boa
sum
pok
ang
hig
123
sky
was
ama
bon
rac
rat
gla
raw
kan
seo
dai
tou
doo
ale
odd
gay
fra
kno
cro
lat
ove
bul
own
aim
six
hun
100
hao
ara
meg
dow
geo
rob
pal
clo
pow
ana
ear
vot
roc
cen
tom
fat
dri
lil
moo
den
jun
sli
nin
mic
tie
les
ken
dro
cap
rus
mos
tot
xin
sti
tun
eli
nat
bur
woo
ris
sil
sch
som
sar
rin
mes
ton
guy
fro
cur
ele
bee
fou
dir
vit
rid
vin
inc
his
whi
spi
rap
alt
sai
sco
mee
gui
hor
nov
hap
tow
ric
neo
doc
bob
fli
soc
joi
cou
wis
mir
bai
wea
ent
rol
lab
ros
gar
dav
hop
tar
wee
dry
chu
roa
fot
exp
god
nee
has
sec
jin
ann
pus
sad
cin
vir
div
hon
kat
kil
abo
slo
tam
lar
ame
dec
bre
whe
ber
ada
ite
caf
edu
myc
din
qua
myt
tro
zen
ass
tor
inn
dot
twi
gri
non
bin
lam
edi
tao
kor
sid
bor
acc
jam
joh
len
bat
coc
pip
jum
fal
rec
pix
sms
hug
pha
ast
mel
tod
deb
arc
shu
mei
dem
hid
tab
xxx
pra
alb
swi
abc
pap
ira
lik
won
gru
fed
zer
dvd
sty
mym
hac
sos
wet
kon
cup
hil
dum
joe
myb
laz
rou
byt
mea
gal
rev
jan
rel
viv
ero
mik
giv
cyb
sub
rem
fri
sor
hua
voi
der
ven
alm
ace
aid
ene
bli
scr
mom
toy
smi
my-
eri
mp3
bol
aqu
bir
ill
lac
gif
edg
ita
put
fle
tek
lim
que
tap
jac
wid
ari
myp
wik
jim
swe
ate
soo
zoo
pul
kit
kim
al-
adu
kal
rep
jia
but
als
chr
gos
gai
ret
cri
bos
sev
inv
ema
hab
sna
did
gap
hoo
bot
lux
dol
hum
thr
adv
iam
wei
tat
lap
xia
fol
alp
ham
typ
gee
jok
nas
dru
alo
org
gan
gog
kai
ext
ref
nea
mya
hen
lee
ami
gon
mam
est
365
fiv
bik
hav
cab
lol
pub
reg
bud
dal
tit
pil
kur
kam
sno
sui
vic
myd
saw
abl
vas
zon
yog
awa
dor
ads
ava
lay
ora
sem
gua
rar
rom
ron
roy
fam
fox
mem
tai
wen
wit
lig
poe
ash
lit
nav
dam
mul
veg
asp
gig
bay
yan
pau
afr
amo
dou
myl
cry
wap
lun
myg
sug
cer
ilo
swa
myf
fer
bag
syn
zha
imp
equ
jen
phi
tok
ato
fry
ink
mid
emp
opt
aud
sla
coa
pun
sys
ray
sus
dub
fel
gul
amb
hip
tag
jos
var
mye
ont
jet
wif
gat
poi
jon
wer
nik
aer
ine
gor
avi
nig
tho
oba
lie
kra
epi
kis
bou
nos
wir
eng
360
pod
rig
kel
mia
nob
tub
fuc
urb
eba
diy
suc
dos
ela
agr
cad
dic
vil
lev
jay
asa
mun
bui
lef
gem
tem
ebo
sak
lou
nar
ram
thu
nev
spr
bei
fut
cai
lor
iso
wom
200
ade
fab
onc
ses
ico
myn
ree
sac
cru
mit
php
ger
ado
ult
rei
alf
vol
him
und
tia
520
myr
sas
cov
aus
fig
evi
ord
esc
row
myh
att
sab
aca
goa
isl
ell
ker
koo
wol
bru
plo
kas
sle
kol
fur
lau
rub
sir
ist
isa
hyp
ata
nec
tig
atl
lak
pon
ing
nut
jes
mol
aba
bis
pit
wai
noo
vel
muc
ipo
nom
pot
riv
exi
das
pie
soh
adm
ori
gin
e-s
ner
til
jav
amp
bun
lib
ape
sci
kay
ido
pee
mur
ans
sis
bao
kom
tol
def
ano
ugl
aga
dad
esp
had
leo
888
cub
ago
ase
buz
pim
myo
zip
doe
mah
enj
ede
oli
eko
mrs
kir
tut
cum
wed
yar
adi
myw
ane
smo
lai
emi
bec
aft
pis
imm
iri
ome
hin
tac
fon
jap
nai
myi
jas
reb
gur
nur
mec
mou
ica
siz
ble
eac
ish
maj
iph
rot
sag
ise
kri
lal
gel
gob
iro
zin
gop
abs
gom
go-
hir
fea
kha
emo
flu
go2
gia
tip
due
ska
beb
dep
pia
aaa
dun
nap
oto
gof
abi
ave
cyc
168
kos
tic
ath
lex
nak
oma
maz
tob
det
eva
lav
moj
num
jud
nol
zap
mmo
usb
rod
hay
sap
hem
hei
eth
cot
sic
jer
ple
bak
pad
mma
jul
ina
ego
css
buc
apo
cow
iss
pak
env
cir
nud
rul
zhu
pir
rag
hey
sce
arg
apa
aco
sek
lip
kaz
neu
abe
quo
ras
sud
hyd
bib
cn-
dut
few
114
kli
jew
eme
yoo
pup
yea
mig
arr
sau
exa
gaz
ped
mim
tos
999
lei
cac
spl
noc
amy
kab
nyc
ion
gil
1st
poc
gps
sot
exc
ams
sh-
aur
aro
fie
ino
spy
ida
wii
bbs
aci
itc
it-
jef
psy
isi
omn
fus
cul
cnc
hes
aka
lia
gle
bia
bum
exe
cav
noi
cep
ups
dil
pel
057
evo
err
sob
pig
yum
dyn
sop
sed
jaz
lum
haw
dur
joo
kro
lao
nit
yin
ech
muz
els
avo
vet
ons
iti
jar
in-
dod
dim
rak
mut
xtr
inp
coi
sod
e-m
yam
abb
soa
elo
igo
sia
fru
bom
beg
isp
ore
abu
gab
goc
jad
kak
ibe
bam
ats
ito
alu
lag
e-c
bog
ort
pam
oce
myk
atr
sei
ere
wav
wri
ust
800
uss
ebi
hob
lec
era
zan
luv
imo
goe
kun
squ
alg
inm
daw
gou
elm
ohi
nes
iwa
pc-
agi
lic
diz
ges
cus
piz
nok
hep
seg
kne
cct
slu
rav
dus
rit
txt
ire
sme
psp
lem
idc
oyu
leb
inb
eca
twe
pep
zhi
ess
goi
alk
sul
rio
ini
la-
cof
asc
uta
mav
dob
abr
pes
bux
mud
elc
tir
pou
fei
yun
heb
gov
nod
zar
idi
yao
kot
alc
acu
emu
gsm
meh
baz
yel
rya
hui
apr
ege
kap
adr
dip
bow
moc
ube
kre
wiz
yet
aja
pom
fem
fet
jor
saa
ach
sep
vod
eta
wag
yon
021
201
cog
foc
aar
kni
ibi
yah
eag
mau
hus
059
051
sym
hak
sah
mlm
obe
ces
yas
upl
kok
enc
sog
gli
soy
e-b
shy
aff
ese
bub
ars
ati
zam
cem
180
reh
ich
hub
sue
fif
djs
kla
usc
mba
pum
aso
666
nad
bah
tul
gad
alh
777
sib
nis
nag
shr
upp
dek
arb
noa
yen
ura
doi
tum
reo
img
ark
sie
vac
ves
scu
lep
ili
gue
ccc
amu
egy
dit
e-k
bey
ald
dns
emb
cau
gha
amc
ced
rum
sni
duc
iba
dna
tet
acr
cic
cob
boc
fue
zhe
ime
rab
hul
duo
us-
myv
no-
liz
beh
kev
unc
boi
dac
owe
meb
cis
lio
liu
e-p
kei
pik
eni
raz
rog
usp
tay
avt
cpa
itr
jak
aki
ted
ais
gho
nop
paw
moe
ske
uma
b2b
ceo
upo
247
gea
kob
ila
esh
gow
anu
hau
qin
ost
oni
ete
eto
awe
cns
rip
api
atm
mex
exo
cig
wec
163
zee
wak
asl
e-t
drs
orb
bic
kul
ena
imi
aha
anc
fau
fos
axi
yor
bef
101
isc
dop
esa
apt
sip
911
jea
vor
nof
lus
010
gym
dov
mca
raf
gis
tib
fad
sjz
haz
seb
xan
cim
uae
epo
wac
nog
yak
koc
ipa
emm
gum
cak
e-d
rui
nep
oka
kou
re-
mum
kum
ens
nha
toc
dua
tv-
kle
itp
von
cia
kad
rud
nac
mcc
dio
maf
tus
bmw
peo
usi
afa
ral
bem
url
tid
075
vib
xen
inh
epa
ibo
rug
upt
kik
kic
aya
gao
dah
elt
kyl
yuk
hur
fes
111
myj
jee
lel
asd
ded
cst
dab
tah
ond
ohm
ush
asm
liq
007
maa
fuz
deg
500
sko
fib
sow
azu
itm
jai
szs
rss
cec
nab
mog
usd
zho
lyr
afi
goh
kwi
tik
jue
apl
djm
bjh
hed
pda
ukr
jui
weg
asu
rut
drm
bau
bie
dag
jab
kua
tof
fak
rim
edo
jag
wep
laf
kem
yal
joa
ole
ona
civ
cms
usf
no1
517
nau
neg
dok
dew
rex
raj
smu
073
ved
dj-
itd
szh
phe
inl
cag
zac
wic
sne
oro
moh
kop
bim
rew
mie
321
tog
msn
fav
teh
i-s
ard
ebe
sss
bjs
ith
itf
iha
jal
noh
nou
lid
ubi
xue
ism
gec
pcs
300
rek
ksa
cnt
bys
hoc
fic
atc
333
guo
yoy
nem
mep
tud
gok
fax
jol
118
hoa
dja
viz
jel
una
kru
hrb
ein
kac
syr
bew
pyr
shm
hdt
adc
esi
ank
eda
nfl
hog
voc
ode
e-a
luk
bok
orc
gav
lyn
chn
dao
ad-
kus
elf
ian
efe
iva
bjj
sew
le-
ler
alw
nil
szj
gut
518
amt
oze
jie
ain
sds
dae
oct
gim
sut
dul
fid
psi
agu
eti
une
zeb
miz
cao
amr
ota
dr-
uns
orl
csc
fuj
usm
411
toa
wra
jur
aln
vat
053
qia
vox
wew
glu
mrc
eig
hi-
wig
wim
lub
drc
fsb
mao
tuc
pec
szy
toe
elp
cnh
anh
sik
ebu
joc
ola
aza
het
ohs
xun
zig
gus
zak
koi
dap
ims
eld
cng
git
kyo
sua
suk
hic
tad
sku
hec
vig
sz-
zel
lom
cet
shj
drb
opa
dei
mms
gag
daf
kup
rpg
myu
mls
phr
lob
wod
nei
omi
kod
fuk
elb
el-
ahm
gio
521
dud
gr8
110
ono
inw
inr
akt
noe
yos
kaf
ecl
bev
uno
hyb
peg
kut
rah
yua
yur
nba
hik
atp
ivy
hex
alr
aks
coz
coe
wad
ifa
stl
oxy
001
008
nen
nek
unt
opi
moz
gma
eso
mug
elk
jog
vam
dha
soi
osa
acm
itb
jed
voo
zim
zet
lok
egg
ifi
dib
e-l
shs
ket
kep
oth
doa
mok
csa
cun
buk
pio
pif
eno
yue
eho
aru
dui
atv
jil
onb
ems
51s
amf
uti
nal
kes
yac
nel
mef
isr
isb
doh
baj
kiw
mae
mop
csi
daz
mip
hud
suz
h2o
eze
itl
aly
ait
aic
nir
spu
zai
mrm
sri
adp
auc
pcp
adh
elv
ppc
ssa
iga
eff
vag
irc
lah
tep
wam
ccs
yok
kao
adt
000
028
lui
sbs
rez
ile
cnb
yul
fog
sok
kho
ipe
acs
zir
wef
ail
nix
wah
asb
asw
isu
mcs
kia
pav
pcm
toh
cnn
sql
anl
jua
roz
i-c
ibu
tau
oly
nxd
itw
itu
cib
irs
nim
aml
yaz
jiu
ott
yer
dif
gei
csp
usw
xbo
tsu
mih
cny
cna
uri
rif
azi
acn
vik
fzl
usl
yil
amg
ceb
kav
e-r
boz
be-
126
upf
zjj
dak
ilu
adl
oas
yap
sey
syl
mew
hah
hag
umi
dje
198
klo
phy
inu
555
ifr
kah
kaw
swo
yee
tnt
csm
ewe
esl
enf
gib
138
atu
anb
079
osh
oso
gzh
vio
itv
itt
vog
erg
eml
amm
ega
i-t
weh
rur
nao
ozo
bep
upa
kie
boh
iwe
paz
abd
twa
okc
iki
mua
cnj
tis
rok
dex
ict
msc
bds
byb
jou
vad
055
hok
178
pru
sae
zom
vim
cip
coh
nia
obi
eki
zab
mrt
mrb
lod
puc
naz
ece
shb
lul
ned
drd
24h
isk
zun
tup
de-
usr
rer
rau
giz
ure
shh
3d-
efi
riz
rop
edd
fen
sif
bym
dsl
axe
cil
591
wem
rej
aig
szl
yol
ife
ifo
asr
asf
e-n
027
lud
ote
meo
baa
pao
moi
deu
csh
oca
gta
alj
hib
aas
rib
byl
vai
dnf
atw
sov
vec
ipr
ips
djt
djc
vul
bha
vos
uca
bry
laa
amd
kaa
kau
e-g
e-h
ecc
beo
yay
104
mab
dej
oke
cui
rey
esk
buf
goj
ahe
aho
poz
izl
hiv
aps
siv
131
xie
aze
oss
ipt
ive
ez-
bjz
bjy
iho
szc
phu
mr-
cci
amw
lop
ifl
naa
syd
120
keb
yat
neb
drp
isf
tyr
omg
zum
ors
ike
cuc
lyg
tvb
pid
kub
ecu
cnp
cne
ici
huo
188
jom
bbb
atf
171
osi
onm
ony
scc
itg
jem
crm
cie
etc
alv
cok
xyz
51c
idr
ute
sve
mek
ruc
oks
uso
vla
haa
kuk
nul
cnl
roi
rox
jst
zzz
byp
bbc
woa
vee
ong
onu
zor
tva
oho
oha
voy
zio
nih
nip
obs
mtv
htm
zag
mrd
boe
nah
sns
drf
oft
dof
ged
bij
ufo
sdh
mi-
bue
ahs
gid
tif
eha
jug
jig
fay
suv
tav
byr
076
djb
onf
bjx
bj-
wax
pdf
lek
tlc
ofi
pli
fbc
cay
lof
hho
eee
snu
cyn
meu
wwe
uto
deh
gau
sdc
tox
cnw
cnm
dev
anm
ign
suo
i-m
hk-
taw
bbw
bbq
mez
ipi
hee
scs
jeu
315
cma
emr
nie
niu
515
cca
nmg
hlj
lur
luo
ked
aug
upc
omo
pcb
koh
ika
pei
adb
400
nuc
elu
anf
cbs
igr
cds
dup
139
atb
173
ose
ipl
djd
uk-
xma
ivi
ezs
ftp
dfw
kna
jaw
etr
we-
lax
fag
emc
ai-
miy
mts
gup
cea
ifs
naw
024
uru
shz
aun
dik
dix
nys
geb
tuf
tug
moa
900
dez
csl
csr
oza
dau
imc
toi
nus
nun
tvs
sht
ryu
hue
anp
ria
678
byd
byc
vap
gzs
onp
bjc
heh
aap
rfi
seh
odi
fx-
szt
co-
gna
epe
sza
yis
pug
e-v
uro
upd
ova
doy
kib
ulu
ma-
tui
imb
pct
pch
oku
oki
ewa
cue
afl
piv
oak
kuw
imu
icc
icu
msp
faz
efu
jod
fio
osc
sao
sax
ivo
hew
zil
szm
zal
yiy
ccm
cco
600
cee
asg
e-f
wie
eci
uph
tyl
mcm
kiz
opu
222
rup
ooo
gek
tuo
tua
koz
fug
sd-
eu-
dcs
ahi
msa
hut
i-d
ror
a-s
sef
whs
177
ttt
zoe
heg
klu
wre
yad
592
uco
ofe
xer
ake
wab
htt
mrp
ccr
yom
029
ary
lut
cse
mmm
kof
pek
tvc
mio
nuk
ics
52s
wro
upb
i-p
usg
jsc
jsj
hif
duk
edm
taz
ebr
avs
zao
fiz
fik
ssi
onh
hef
smc
fha
agg
sze
aib
epr
obo
ume
cae
51d
mrf
amn
stc
rhi
020
shl
dma
drk
isw
bap
unl
hbs
ayu
pax
pca
iko
tvt
lym
588
kud
nue
cnf
anw
037
jub
alz
aph
bya
112
myx
wot
xml
acl
bjl
bjt
itn
pds
yab
zib
szx
jax
ofs
lcd
gud
caz
ibr
aes
511
amh
yob
dtv
e-o
jsh
wip
lup
unb
kip
orm
koe
fum
chs
afo
raa
elg
llo
cnd
nlp
faq
icr
i-r
tcm
qat
byo
byf
jot
mey
nxt
osm
ssl
zol
gfx
ugu
odo
gcc
szd
vse
kry
inj
alq
szb
51g
ucu
cei
st-
sts
886
668
oxi
e-i
naj
023
yag
drt
mox
goz
qqq
gac
syc
lys
kwa
anz
p2p
cdc
arp
imt
rik
tcs
upm
2nd
soe
gzy
ezy
acq
bjd
aal
uga
evr
ihe
dba
zes
mvp
nif
waz
htc
eka
cah
51t
zad
egi
bop
rho
shc
lug
oti
upg
gyp
202
pcg
ikl
biy
bip
usu
wyn
esm
dcm
yot
bsa
icl
anj
pog
ige
efo
up-
dnd
at-
hae
so-
onr
onw
oht
scm
aam
ex-
cid
jat
akk
ers
aik
waw
ibm
caj
ibs
ghe
zah
eis
ps3
loh
as-
lir
ids
tma
cyr
cyp
beu
ngo
045
ean
hns
106
opp
zur
uli
nym
iwi
csb
lds
chl
mii
mib
adj
oci
occ
elr
jit
i-b
bba
dst
dss
upw
atd
qwe
olo
djk
zou
qoo
ohb
ohc
fta
lez
ctc
jah
erp
zei
tei
co2
nib
mtg
szf
spc
51h
51p
513
zaz
yid
ccb
ruf
idl
naf
nay
bek
upe
drh
drg
ovi
yem
ops
789
hds
cso
csf
okt
fud
iow
dca
enl
lla
121
pps
suh
3dc
ril
xpe
edw
hks
dwa
fia
mds
176
xiu
haj
haf
fob
azc
on-
ezi
vix
aan
puk
tsa
ptc
akr
zed
zep
51y
512
mra
sre
loi
rhe
e-w
adw
ru-
id-
wib
cya
bez
urs
wos
wok
kek
041
hnh
upr
drr
mcg
808
zui
hbc
ays
orn
gez
pab
pcc
mmt
gaw
syt
pem
sdf
vlo
iii
941
mif
ahl
mst
mss
g-s
cbc
nbc
i-f
i-n
eft
cdm
apn
apc
byw
skr
myz
frm
077
mbc
192
psa
cif
ley
zik
567
dbs
eru
erd
lae
teg
eph
umu
eks
ibl
516
zat
loy
cev
ncc
kag
kae
adn
jp-
syb
hie
jis
hnj
m-a
isg
tuk
kog
afe
da-
muk
esu
uba
enu
cnx
525
tts
4th
yus
jid
teb
3ds
hiz
rie
caa
usn
taa
azt
158
818
ipc
dji
axa
ohl
zgc
hwa
166
cme
cmc
ssc
aix
nid
niv
mtn
sph
am-
yma
rua
cs-
cd-
shw
shx
kew
eaz
102
drl
m-s
isn
isd
opo
rue
nyl
mof
csd
pci
mmc
sya
oem
sdl
cox
pib
52c
t-s
rov
sbc
izm
arn
rof
dug
byh
bye
whj
joj
hms
119
vau
hoe
mev
071
ipp
trs
aac
xal
zit
phl
wek
aiy
aip
gno
eps
obj
szg
szw
51k
zas
pud
nca
stp
kaj
adg
ees
ozg
ecr
awi
shp
jiz
hnt
aub
103
drw
ayo
nya
hdf
iku
ewi
dgs
chy
eun
sf1
sfs
ilm
kuc
rae
muf
369
xxl
icy
dys
fap
181
i-l
cdr
edc
tae
tch
myy
myq
my1
hoh
fij
mda
atg
qiu
vem
djr
djf
djl
uka
gds
ezp
bjb
ohw
smb
oda
lew
aec
iny
mcr
zem
uda
aia
umo
dxs
51b
ifu
asy
maw
kec
otr
qhd
dmc
nef
yeu
pvc
ayd
orh
iwo
gep
sba
lyc
iin
afu
580
ksh
adf
vps
en-
go4
to-
420
cni
kyu
ryo
juk
jut
fah
eds
taj
130
avr
jma
veh
ddd
acg
bja
ohp
cpr
cpu
xam
cio
leh
ziz
zie
xco
qwi
jae
ets
mhs
pse
aku
akc
tej
jxs
ilc
thy
obl
51j
51m
mrl
cch
yoh
pue
ifp
lix
idd
nga
125
lua
jip
108
wuh
is-
24-
oms
mcd
ula
orf
cqs
moy
mow
goy
hys
csg
pcr
bi-
sdt
sdj
444
tsi
eup
shk
ks-
vra
enn
enr
elh
cnz
roj
roe
deo
msh
msg
zjy
gp5
apu
ebl
ebs
tcc
hod
hoy
mdc
175
wob
smt
fom
azo
khu
gzb
fps
sse
djp
gdj
21c
tvp
bjg
ohh
cps
cpw
aad
aab
aat
ctr
szr
jaj
ofo
eos
ako
aio
szp
ca-
51x
lms
yif
yim
mrk
mrg
gwe
gwa
loj
mpa
ifc
svi
yaa
wus
bae
unm
mcl
mck
yeo
zeu
hdd
csw
okm
koa
cfa
gae
tvm
afg
tst
reu
mij
muh
dcc
ubu
kuz
oco
ocu
gie
tiv
roh
52d
ppl
qqm
pov
jup
al3
i-g
jsm
jsy
scp
cda
a-c
siy
fim
irr
wtf
azm
nxs
djo
bjm
bjq
scl
bhs
exu
crs
nrg
leu
cts
cto
jaf
uch
erc
hrh
emt
ilk
epc
bpo
obu
gug
guz
fbi
51z
zav
yib
mro
mre
loe
ffa
cef
ift
nci
880
ruy
cgs
nae
ec-
ecs
12v
128
aul
upi
do-
doz
mc-
mch
kif
zul
hbh
hbj
nyt
3rd
gew
hym
abh
pcd
okl
oko
zjg
gaa
cua
bif
sdx
sda
tvo
ogu
miu
njs
rc-
ubo
lbs
ocs
nub
366
vvv
eln
hss
ahc
gip
gic
52m
msm
iat
suf
i-a
efa
jsa
hia
tgp
xpo
fec
siw
yuz
133
byg
wht
031
av-
sk-
vah
dsm
atk
me-
bnb
soz
nxb
ss-
gzm
ork
ohf
cp1
xoo
ds-
xua
jeb
cti
zid
puf
xch
hyg
puz
unf
wev
nvi
cmo
erk
tsh
szk
guc
aol
510
mrw
mrh
mri
hva
cce
mpg
cey
stb
stm
889
e-j
cys
jir
988
a-p
bax
mco
089
fsh
3rb
orp
cqj
paa
pcf
okb
mmg
tov
gaj
sdy
chc
oga
sfc
ilg
es-
enp
clc
700
bst
tvr
tvd
hsc
yup
iac
anr
sst
cba
otc
faf
sbi
efr
cdl
arq
uuu
u-s
zzs
ixi
xpr
apm
taf
dwi
998
035
dls
jop
rpm
bbt
tco
czy
hof
hoi
foa
mbi
khi
nxc
gzl
sso
091
hcc
uks
saj
ezg
ezc
ohe
oh-
cpc
ugo
ziy
zic
gba
etu
erm
laj
zev
aie
nio
mtb
ghi
noz
51w
cc-
veo
hhh
ceg
liy
jpc
gsp
gsc
cgi
ruh
tms
cde
ies
gzj
cyt
woh
a-m
hny
hnx
bcs
upn
m-c
wul
gyr
sly
sls
kio
zuo
diu
hbx
oru
tux
paj
gry
hyu
qq1
gak
cud
bih
sdr
gco
eus
dax
ude
shd
gmc
enk
enz
och
ahu
52h
msi
ias
an-
dym
igi
izo
3dt
3da
hij
arv
yla
116
smy
xil
17s
iru
nxw
osu
ssh
ssd
jib
ezw
ezm
ezd
ohg
scb
nri
sxy
jcs
jca
zis
bts
x-t
zgs
mcb
akb
tse
coy
plr
mtc
umb
gub
eke
yih
d-s
yod
mpe
mps
osl
egu
iff
ncr
002
gsa
uts
utu
ect
568
b-b
luf
jiv
ots
a-l
bcb
hnz
wux
tyc
baw
086
tuv
hd-
yyy
ab-
qqs
ok1
ok-
pco
gaf
nsa
mkt
sdg
dda
iit
tsc
pih
sva
gnu
aij
im-
ajo
bsc
rst
ely
hsb
hsh
hst
tio
icp
gtr
yuu
huc
dyl
igl
faw
jsp
hih
swt
rir
zzy
edr
hkc
byn
whh
owl
vak
xte
dsc
aty
xis
irm
dho
olu
nxz
nxi
vei
tta
djh
onn
zod
21s
ddr
bjk
bjw
hek
cpo
vij
hzj
it4
jec
seq
mla
psd
sgs
szz
uci
ssb
nth
lcc
guv
v-s
mrr
ccp
ccl
ccf
osp
ifb
ncs
ncb
std
881
pwn
cyg
aws
122
dmo
dms
ney
qar
unp
unw
ung
slc
kii
kig
opr
fs-
ooh
maq
gef
geh
tue
bmx
pae
hyi
abn
jdm
csn
csj
qqc
868
fu-
pey
sdd
sdm
tvl
tvf
iis
afs
daa
miv
nhl
rao
bsd
nuv
nuf
mup
cnr
cnk
hsm
ahh
pms
52y
yuy
yuc
poh
ehe
teo
3df
gp8
gp6
gp2
778
cdj
duf
898
cfs
bdf
by-
xra
bb-
avd
bcc
my2
cze
hov
a2z
oll
azb
qic
nxe
ip-
gzx
jif
djw
onk
ezr
ezh
dds
ac-
918
ddl
bjp
ohr
cp6
cpm
smk
vii
858
jej
jeo
exs
seu
5st
pst
agc
fxs
jaa
gbc
btc
etf
ucs
tsp
ofl
xel
558
556
zea
zek
372
nts
hra
bpm
waf
caw
sps
tjs
51f
zay
yit
yiw
yop
fff
e-e
iya
jps
gst
jsf
idy
dvi
ibc
mga
kea
jha
jls
eam
dmi
wun
063
ukb
tym
doj
omy
unk
mct
mcp
fst
fls
kkk
orr
920
hdc
kma
pts
bof
hyl
abm
qq5
kov
ewo
syo
usv
lyd
afc
euc
bwi
p30
pga
udi
mui
iot
adz
njc
dc-
esd
nwa
345
sct
nhs
nup
nug
tvg
lle
jde
eja
yu-
shf
shn
huz
rma
fae
tef
s-a
izi
3dm
iea
arz
duy
hkd
sii
kbs
lvy
jms
cza
yud
mdj
xio
smr
xvi
bna
fok
azs
nxg
nxj
gzc
ssm
blc
wpc
gdl
acp
bjf
bjo
hej
ohn
rx-
199
itk
itz
jep
bhu
pdx
xar
vom
vou
trv
456
fxd
ucc
zgz
phx
ofm
ofw
weo
mha
inq
hp-
qur
gly
c2c
ils
epu
waa
mte
aon
51l
yix
ccn
ps-
tds
sro
srs
mpr
c-s
jrs
884
i-h
oxe
stj
gss
rhy
idu
idn
ulo
ngu
keg
jlg
ead
hnc
auf
ovo
zwi
unu
aye
oop
lst
925
mo-
hyt
ggg
qq2
qqb
okp
zjx
212
ozc
fuy
peb
sdb
lyt
ch-
eud
eut
ogr
sfi
sfa
gms
dcr
pii
piw
nws
ahd
b-a
pmp
52a
52t
msl
ms-
gts
eju
iap
huy
fcc
pob
oya
yno
3di
gp9
js-
jss
arl
urg
zzl
edl
dwe
135
whu
ebc
avu
tcl
owa
my3
dsp
vga
lpg
250
fod
soj
az-
azn
nxl
ufc
gzz
ssu
gz-
tev
ttg
mks
ukc
axo
ezl
ezb
mpi
acf
ohd
ohy
cp3
sc-
vif
hzs
jeg
se-
vov
zia
kps
dbl
uce
zgy
ofc
xem
wxh
cmp
ak-
x-m
ert
c21
epl
aoc
sox
51a
pne
x-p
cek
ifm
woi
iyi
hbg
hbt
idt
wi-
025
ecm
ecp
rs-
bej
awo
yaw
xst
987
m-t
m-b
686
tys
fnb
mcn
mci
opc
225
085
ule
orw
gey
geg
tuz
bma
edt
prm
dey
qqd
qqf
pce
zjs
860
vns
jbc
aed
nse
lyl
daq
mub
esb
rcs
buh
bua
scg
kug
vpn
401
ajs
aju
aji
rsh
bsb
icb
pyt
nnn
cno
529
msd
kya
mgs
eji
anx
dyd
juj
faj
nbs
i-v
gp3
cdd
cdi
cdp
iec
wou
rix
grr
whc
dlo
jow
bbo
bbi
avc
nds
vaa
txs
jmc
yub
mdi
dsa
xim
sjo
olg
151
khm
khe
nxr
nxy
gzd
234
ttc
jio
ukt
uke
fmc
sa-
gdc
ezt
ez2
cpi
smm
hzy
aah
ugg
crc
311
pss
cik
jcm
zix
gca
npo
dbe
ett
cvs
5th
zgh
zgj
zgl
ofb
vsa
eon
pti
cmi
zey
kij
ktv
teq
yep
ilp
bnt
obr
mta
kvi
szn
noj
zaf
yiq
yip
uva
1ma
ccd
ccg
amv
dzi
c-a
x-s
egr
stf
wog
3gs
eep
tmc
kcs
1to
gzw
gzt
022
cym
566
mgm
luz
yaf
zys
a-b
eal
sjc
jjc
tyn
un-
unr
mcf
mcu
yeh
fsc
fsd
dij
tnb
ayt
oot
flc
zna
zsh
cqh
cqy
hyc
aby
csu
qqk
pcl
mm-
koy
ko-
nsi
sdw
sdp
sde
pma
sih
ogi
sp-
muj
njd
njj
njo
njl
dct
rcc
buj
eny
ocm
nuo
hs-
cnq
hsd
ths
ahr
msb
ppm
yut
yuh
huf
fcs
t-r
pta
vma
izz
aa-
i-e
jsd
cdb
hiw
arf
duh
duz
5ai
zzc
bdc
hkm
byu
byk
nfs
whm
dla
bbe
mfa
skl
sk8
owi
xta
dne
jme
mde
smp
lps
wto
sjs
dhs
mbe
azr
nxu
nxv
nxf
vex
zri
gzf
099
saz
yst
211
gdy
gde
gdi
eza
ddi
910
bje
cp8
cpl
scw
mne
xoc
xon
xom
it0
bho
exb
tpa
xat
xac
sxt
jcb
jco
jch
lej
cte
aei
qtr
etn
579
rvs
inz
in2
pto
zec
nti
aiz
nii
bpa
ums
spm
ghs
mrj
vur
qil
zgg
eir
eid
src
mpc
hhs
stt
lih
liw
rha
wof
iyo
gmt
bov
gsh
syu
jns
bex
mgr
xsd
a-a
hnb
hnd
aux
hch
neh
mej
drv
m-p
wud
zwe
isv
066
ofp
mce
slt
yek
oph
vdo
220
fsf
boj
nyp
nye
828
bmi
hdm
oin
hya
prc
okr
okn
pc1
dg-
syj
dgh
rgv
lyb
ddo
wzs
eum
eug
hr-
req
ksp
cjs
ior
esq
dch
dcb
rct
bup
rre
4x4
aif
muy
hsa
aht
pmi
528
52p
msu
mse
ic-
ppi
yug
iai
huu
prz
rmt
igu
nbd
nb-
3dw
cdx
iet
iel
ar-
duv
upu
edh
apf
vca
655
whx
jov
115
113
bcl
woy
fip
17w
foi
azz
azp
155
nxp
nxn
nxh
ipb
jij
djg
ukp
zot
ytb
gdf
cpp
scj
scd
zmd
smd
pbs
hzh
it1
ftc
jez
dfs
odu
aag
nre
pso
ciz
jcc
agm
ctt
sgt
xce
gcl
aeg
tso
578
zgb
phs
ofa
160
aky
akh
ery
ern
uan
zeg
al7
nta
nte
tey
hrt
bni
xys
wao
mtr
mti
mtm
kva
ibn
aet
51e
yic
vue
vun
rwa
yo-
mp4
wmp
c-c
ffi
uve
ifd
asv
nco
woc
jpm
jpe
jsb
ruk
utr
ozd
syy
urd
fjh
shq
dpa
e-z
kef
jix
ukf
syg
hng
10k
drn
wut
wuz
24s
24x
isy
jja
tyt
sl-
zuz
224
zzt
hbb
fll
3ra
imr
bms
hdp
pah
cqt
hyo
wsp
qqx
hfs
pcn
186
kow
dgt
pew
pej
pef
nsp
sdn
tvi
tve
daj
ogl
oge
sfo
ysa
nja
njf
dcl
dcf
dcp
scx
kui
tmi
enh
enm
raq
muv
hsg
ppa
tix
pmc
523
kyr
ppp
ppt
ejo
yux
ssp
dya
rms
juv
g-m
uwa
jic
iza
i-w
cdn
ied
hiy
zzj
zzh
zza
xps
ibd
cfc
hkb
usj
fev
feu
fey
sio
kbc
136
ltc
whf
990
dl-
avm
tjh
mfg
tcp
nde
vaz
vav
bco
lvs
txd
atn
nez
wts
ir-
hax
fow
azh
qib
qis
nxa
nxo
veb
ipm
ipu
090
tti
jik
fms
zog
917
acd
heu
hez
mdp
cp-
scy
smg
xox
itj
hzl
gyn
313
knu
sxh
sxs
jcw
ags
ctm
sgr
ihu
gbg
bti
tsm
bt-
ofd
vst
tna
erb
hrm
emg
emy
gne
lca
mto
51q
qid
tls
qiy
amj
yoc
wmc
mpl
jd-
yse
uvi
ka-
lij
669
oxo
ady
hby
003
naq
nax
dvb
hi5
tew
jnj
pvp
rsc
cyl
shg
luy
kej
yaj
zyb
zyn
265
980
jly
jlc
043
eau
105
qdh
wuy
jjs
ukd
omb
sws
xjy
wwm
zub
hbd
ayn
oos
nyb
141
ls-
788
kmc
ius
hye
011
hyj
eyo
jdz
qq3
qqt
qqw
qqy
okd
nct
sb-
zj-
zjl
zjh
jbs
jba
cuf
cuo
nsc
sdz
tvn
xbl
ksc
tjd
ios
dco
dcg
rco
kue
thc
mue
368
xxs
elw
ywa
cnu
ro-
866
52w
52x
msr
gtc
eje
yuw
iar
huk
anv
pr-
ttd
ssg
dyb
501
uhd
juy
cbr
j-m
tez
gp7
gp0
cdh
zzx
a-t
edp
bda
hkh
feg
feb
132
137
ltd
tps
189
dlc
dll
bbl
bbg
hmo
tjc
tca
117
vaf
lve
bca
fuq
052
dnb
jmj
czh
mdr
399
prp
dhi
dhe
foy
mbo
nxx
vea
hcs
hcg
yzh
wpa
ezn
ezf
ack
hev
cp7
smf
197
191
xor
klm
jei
aak
exh
exx
xad
iji
wzj
jct
agl
trc
evs
leq
mjc
gch
fxb
dbo
aea
bto
lg-
tsb
zgm
phd
j-s
gls
akd
qzo
ntt
jxy
em-
hrs
aiw
aih
niz
plc
xyd
xyl
xym
mt-
kve
3gm
aob
aoo
aor
noy
zax
wco
jtf
eim
tld
sra
yoi
c-m
ceh
ifw
asn
oxf
jpl
jpo
jpg
gs-
3gt
hbw
rhs
cgm
idm
idb
utt
tmd
kcc
kch
ozi
ozt
jnh
rsv
6rb
urp
keh
yav
otu
xsp
dmg
107
drj
jpa
qds
m-o
ovu
jjj
gyo
tyd
tyg
388
169
yey
wwt
wwa
iqu
ayi
ayb
flp
nyr
bgs
nyf
140
xli
ory
gev
imf
bmc
hdv
hde
na-
abt
abz
jda
pck
okw
okg
mmx
mmp
mme
mmd
fup
pex
sdo
sdk
lyx
lyf
lyo
lyj
chm
chw
eua
581
qun
udo
sf5
sf3
kst
wym
njb
cls
ajm
rse
bse
361
nu-
mu-
elj
gij
icm
52b
mso
msk
kys
gt-
yuv
ial
iad
iaa
hup
dyt
dye
juh
cbi
faa
616
lhc
s-d
gpt
gp1
jsr
arw
rxm
u-c
4my
xsc
xpl
cfp
bde
hkp
ouy
xre
whd
ebd
dli
dlm
bbm
bbd
bcn
txc
wop
mdm
dsi
dsd
dse
dsb
172
xib
xid
wtc
256
gxy
pry
dh-
olm
fop
mbm
mbs
159
nxm
gzk
gzg
tty
ssk
093
zoc
xmo
xmt
wps
gdp
gdr
gdt
210
915
bjr
cp0
cpt
mno
sml
pba
vih
hzc
jjb
97s
exd
oja
878
psf
cih
jcp
trt
ctp
ukg
ziv
2se
fxg
dbd
775
aee
lgd
rbs
x-b
cva
zgr
ofh
wex
yri
wxs
wxc
in4
w-c
cmb
erw
550
jxj
suj
aiq
ofu
pls
obx
mtl
szi
aok
aot
nox
lmc
zae
zau
mrv
ccw
ync
nma
wm-
c-o
c-t
hhe
cew
pua
dts
660
e-u
jpr
jpt
3gp
3gw
005
gse
gsd
eem
eec
cga
bgc
bgt
ayy
aqa
tmp
uls
ycs
yco
ozm
bfs
hla
eck
ecz
b-s
nge
ya-
jlb
xls
eab
10-
688
wur
wuj
mxd
24c
baf
omr
omu
801
806
203
zug
fss
jhs
ruo
ayl
ool
lsp
orz
zsc
cqb
hdh
paf
yyt
zq5
hy-
4ev
ggs
csy
qqp
qqz
okh
mmi
zjk
prs
cfl
iks
fuu
fub
bii
biv
peu
nsf
nss
ns-
zhy
tvh
lyh
vli
cht
iic
afm
cws
miq
miw
ksm
vre
uin
iop
gmo
adk
njp
esg
esn
dcd
tjy
zdr
kuu
enb
bsm
nua
jyu
cn1
giu
icd
icq
52e
ryn
dyc
juc
g-o
cbm
t-b
t-h
nbt
nbh
nbb
suw
su-
lhs
s-c
izu
jsl
jsw
jsz
cdt
imv
qal
qan
899
u-p
yli
xpa
edv
ibt
usk
f-s
whb
991
hmc
tjt
ndo
xto
q-s
054
dnn
vow
wsf
czt
mdo
syh
xit
xix
xiy
sjh
ha-
nxk
3st
osb
ufi
yde
233
ttm
tte
djj
dj0
hco
uki
zop
zoz
yte
yzs
gdb
woj
919
dde
91t
pys
cp9
cp2
cp5
mns
mnc
pbc
xuy
hzd
trm
hz-
trb
tsg
aaw
ugi
uge
sej
mle
mlo
voe
vok
vop
psl
cii
955
jcd
agn
ody
ctv
ctf
sgc
pfa
gci
m2m
ihs
fxc
dbc
cvm
xed
xee
xeo
nvr
ts-
vsp
yre
hpi
eol
cmd
cmy
552
4yo
ntc
kte
jxz
wzh
hrc
emf
lcs
wa-
mth
szo
guj
htb
ekl
jau
uzu
aod
aos
aop
tj-
519
zaa
zaw
yie
yig
yik
wcc
a-v
jts
eit
d-e
nms
wms
x-c
jra
jro
egl
ifg
a-1
ncl
stv
885
663
662
661
ymc
e-y
idg
uth
rte
syk
rts
yca
4sh
nyw
jne
jnl
hld
ecf
snt
awf
urt
tbc
1-8
zyx
263
k-s
xsh
a-g
jlo
n-s
bcp
aum
10d
nej
109
drz
qdj
m-r
mxs
omc
omf
161
kih
opl
wwp
228
227
226
fsa
fsm
dii
diw
nyu
nyn
nyd
kk-
tbi
wny
tu-
imd
786
cqd
qst
hdr
hdn
ois
kms
zq1
1ba
gox
gg-
abp
twc
jdc
qq8
ok0
pcw
oky
mm1
sbu
zjc
zjw
187
vne
fua
dgc
jbm
bix
pez
ufa
mk-
lyz
ddc
gcp
dd-
eul
lfs
cwa
qus
585
sft
sfp
kss
ksd
uit
ilt
wyc
fyr
esr
dci
rcp
rca
buo
ubc
wds
f-i
aje
nhc
lbc
pkp
rsp
xxy
xxh
cnv
ahj
ahw
tiz
52l
52z
tbs
ryb
gti
ppo
ryt
iaf
fct
dyk
3ba
juz
g-l
g-c
g-a
cbt
t-e
t-m
oyn
yns
3dp
3dg
3do
3d5
3d3
jso
jsx
cdg
rta
ieg
iml
ie-
uus
riy
h20
zzb
4re
711
cfo
cfi
bdb
bdr
hkt
usy
sij
byi
oui
vcl
030
rpa
rpc
bbf
avh
avg
my0
txm
czw
dsh
dsf
syx
xih
17t
vgm
lpc
lpa
gxs
074
dhm
sjb
bns
bnc
olv
ump
foe
fof
mbt
azl
150
qih
156
vep
811
ipg
ipv
ipw
ttb
djn
dj9
ukw
fmp
axs
xmx
yzy
tv2
acb
cph
mnt
sc2
vi-
kly
dfc
itx
ity
hzt
it3
aao
aai
sez
xax
xav
psh
voz
psg
i2i
ciu
ag-
qud
agb
jc-
agt
evc
trp
toz
cta
ctb
332
npc
kpa
fxp
mjs
ael
bte
btm
bth
tsl
etv
ucb
x-g
tss
cvc
rba
zgt
zgw
pht
xet
mho
zyd
vsi
yra
cmm
ptr
brc
la2
laq
zex
371
hre
emd
coj
aii
bpi
wae
3as
guf
hta
aoa
aoi
aoy
tjm
51n
yij
rsm
rso
vud
d-l
ucl
tda
yow
3-d
wmm
c-b
x-r
jrc
vek
hha
ffl
dih
ncp
nch
dtr
882
887
li-
ymh
ymo
stg
jpi
009
gsu
gsl
cg-
cgt
rti
ozb
ozl
ozz
vbc
wix
jnx
hls
ecn
rsa
fjs
fjw
dpr
dpi
ttl
luu
zyg
012
zym
989
a-z
hnf
hnl
hnw
bcr
auk
hcm
wub
zwa
xur
ukl
gyt
dox
bav
omp
unn
kiv
iq-
223
083
081
087
di-
jhe
jho
bo-
hbz
ayg
ayc
fl-
921
927
tuy
tuq
imh
hdw
hda
hyf
hyn
hyr
abq
ggw
ggi
ull
jdw
jds
ryc
okk
mmb
cfe
qlx
861
18t
vnt
215
gdw
gah
syz
jbl
ypo
peh
mkm
sdi
rds
ii-
91f
gcs
qre
qro
sf0
sfd
vri
ild
gme
fyi
nje
njm
njy
njx
esw
buu
piu
oat
pi-
nwt
nwc
mwa
kuh
clf
eqi
bsi
pks
stn
kwo
jyo
hsj
hso
pyl
urn
ti-
llc
b-l
pmg
524
526
527
52j
msj
ttr
kyn
kyd
ryd
ppg
gte
jwe
fcp
rmb
wbs
wba
ehs
ehi
cbd
rvi
hwy
t-d
nbp
suy
yni
yna
s-p
ynt
3dd
gpo
3d8
i-i
776
771
cdk
uuc
tgi
imy
zzf
zzd
ylo
a-f
grs
grn
01s
cft
bdo
bdt
bd-
047
spt
si-
oul
oug
zxs
ybo
whw
99w
99c
99l
wst
ebb
033
dlp
dlz
dlx
avn
kda
ndr
vaj
vab
vay
xti
966
txh
woz
czj
mdl
syf
smx
kfc
irp
258
frc
rgb
prn
070
dhl
sjl
bnr
mbr
osk
wmd
ipd
ip1
gze
ttw
sd1
blg
ukm
zos
833
839
zhb
hxy
gdg
gdm
gd-
tvw
7la
916
91d
heo
rym
cpe
mni
mna
xot
xol
851
sm-
gfs
trd
bhi
hgh
exf
exl
ojo
cr-
crt
xas
xah
xaj
877
psm
knt
ije
jcl
agh
agp
ueb
ctw
ctd
335
xca
2sh
lzh
455
dbi
dbz
gbs
aen
btw
bta
ety
etl
571
575
lgl
lgs
zgx
unh
353
gxc
krc
jzj
jzs
wxt
pte
cmg
glb
xga
mvc
ktm
lct
bpr
xyr
xyx
xy1
mtp
3al
umm
szq
ht-
ekt
fba
spb
aoe
aom
51v
5is
vut
vus
qim
tlb
ccv
cck
psc
nme
srp
srt
loz
wma
mpo
1pa
pui
c-l
hht
ffs
ifn
ifh
nce
sth
yms
tfs
oxa
jpd
gsf
766
eer
770
bg-
cgo
cgc
cgp
kca
kcr
ozk
wiw
wiv
xph
702
cyz
rsf
swc
fjj
urf
mg-
12-
dps
keo
yax
mzd
zyk
zyt
wxy
k-l
jld
jlm
ubr
n-e
dm-
eap
dmv
hne
upv
qdc
yge
m-m
24p
gys
tya
389
unv
slm
80s
vdi
zup
a1c
fsi
jhc
tnd
hba
oon
ooz
flt
nyg
lsd
lsc
lsa
xl-
xle
826
zss
zsj
a7l
tuh
imn
cql
cqc
pa-
zla
kmg
kmo
yys
abf
ggt
ggo
ivf
eya
eyl
qq-
qq9
csx
qqa
pcj
pc2
sbo
zjd
ikn
ga-
fux
fuf
sy-
rax
dgj
syp
cug
cuk
qwa
30d
peq
305
ceu
zh-
chh
iid
gct
euf
cwi
sf2
sfr
sfl
sfe
ksk
xda
uic
tje
cjm
iou
4ca
wyt
ily
gmi
njw
esy
tm-
565
piq
nwb
clb
wsh
wsm
wso
go0
bs-
bss
bsn
ocl
nuw
362
zbl
kwe
jys
jyl
icf
hsp
ahb
gik
sqw
llt
lll
icw
pmo
pm-
52f
1se
msy
xzh
ppb
vta
gkr
dyg
yho
juu
jux
g-b
cbe
cbo
t-w
t-a
t-c
nby
sbm
s-t
s-s
s-m
s-k
wla
3dl
gph
gpa
dfi
efs
efl
779
cdw
qle
iep
bfi
iee
rih
qas
u-t
wov
2da
grm
01t
cfb
bdh
hkr
oub
vco
f-c
whl
99p
99b
2bi
2be
ebt
jo-
rps
bbh
bbr
avl
avb
tcb
vae
jm-
dng
dnm
dnp
czx
czl
hoj
cz-
mdh
dsw
dsg
17m
17g
17d
699
irf
072
dhu
sjm
sja
bno
ols
azw
vew
vej
ydy
235
tth
tt-
dj1
uku
fma
rmc
zof
zoy
axx
xmy
zhh
wpt
1co
gdz
21a
21m
ezo
ddt
ddm
91x
cpf
scn
zma
pya
yxz
hzb
hzp
kfa
ftv
swg
dfo
crd
310
se7
mlb
mli
voa
876
psb
rdo
iju
ija
fzs
ci-
cix
sxd
sxj
sxm
jcr
jcj
qvo
tr-
evl
598
ctn
sge
mj-
npa
gcb
ihr
ihi
4ba
szu
ae-
db-
gbo
aem
aeo
btp
btl
lga
cvi
zga
ofn
xei
xes
wez
nve
wxj
w-a
cmt
7th
uas
zef
zez
pja
ntr
nto
kta
teu
tsd
hrd
wz1
coq
thw
epm
rna
lci
obb
xyb
wau
fbs
aow
ghd
yi-
wcp
wcf
mry
yto
ytj
yth
3co
vuo
qik
eil
ccj
d-i
d-m
amk
nmc
dzh
srv
lox
mpd
mph
jre
jrb
hhm
hhl
uvm
tfl
tfo
ncm
ncf
dto
dte
lii
qba
667
yme
woe
jph
006
gsi
eed
eeg
88t
cgl
bgb
fda
tmt
svo
dvr
rtc
rtr
wia
jna
jnm
sng
hlb
snc
yro
snp
pvr
ppe
b-c
b-m
12s
mgo
yau
jiy
zyc
985
jle
jlj
swf
dmd
dmt
hna
bcm
hnk
up2
hci
10s
qdd
wum
jjw
gya
gyu
baq
glj
mcw
ki-
xj0
kiy
xjd
809
xjs
yed
yeg
yez
221
fsl
fsp
fsy
jhy
hb-
tnc
hbm
hbo
hbp
hbr
ayh
oob
nyo
lss
kkt
kks
xlm
822
jfk
jfs
cly
bmb
bme
hdl
oic
oit
zlo
zli
kme
kmm
yyh
hyz
abg
ggf
902
tws
qq7
csk
csz
bkk
bks
cs1
okf
mml
sbe
zjz
koj
rrr
vna
214
dgd
dgl
dgr
biw
biu
309
303
nst
nsu
bcf
lyp
iib
9ai
589
pgs
ogo
sf7
sf-
325
sfh
2pa
1de
1do
ilb
cjr
gmm
288
gmp
fyt
tjl
ad2
nji
njr
tmg
dcw
rcl
rcm
oan
nwp
kuj
kuf
xfl
f-a
404
wse
ra-
pku
mux
muo
363
elz
hsi
hsn
hsx
ahg
ahk
ahy
nna
tij
nn-
pml
1sh
kym
kyc
kyb
xzj
127
rys
qqh
fca
tkd
tka
dy1
h-s
ssz
ssw
dyw
ssq
dyr
505
uen
wbo
juw
g-h
g-f
igs
t-l
j-b
fao
nbx
nbf
sbg
sbd
yui
sbk
sbl
oye
ynh
yne
s-f
s-e
4pa
wls
3de
3db
i-o
773
jsk
cdf
cdo
cdy
88b
iem
ien
arh
uup
tgr
892
897
yle
a-d
rri
hke
apb
ap-
siu
vcd
vci
zxc
ybe
99t
99s
995
dlg
dle
dlh
dlt
dly
joz
bbj
733
bbp
avv
avp
mfc
tct
tcf
skp
skf
ndc
lvc
lvl
my5
cfd
xtu
xth
960
004
txg
txb
dnc
jmi
jmo
jmb
czm
czb
czc
hoz
fi-
mdt
md-
xig
17l
17k
17b
nzb
gxh
sjy
fc-
jkb
sjg
mbu
azf
azg
157
nxq
osw
3sh
vez
oes
ydi
ipn
wvc
232
tto
jih
dj5
blt
bls
blk
fme
fm-
qor
xmi
xmc
xms
yzf
yzj
hxs
hxz
wpi
218
gdo
tv1
bjn
ohk
uis
cpb
cpd
mnm
mng
pbt
xos
xop
kll
pyc
yxt
gfi
2ho
ftw
fti
xic
jek
dfg
aaf
aau
aaz
exm
312
xao
xab
xag
nrc
mlc
vob
sxl
sxp
sxx
sx-
337
sgm
sga
sgb
zij
pfe
pfo
mjm
vms
lzc
kpo
ruz
459
fxl
dbb
dbr
btt
etw
577
lgb
zgn
xev
xez
wey
mhc
vs-
jzh
jzt
hps
hpt
hpc
eor
brt
brs
erh
551
uwe
pjs
mvm
kt-
kts
yps
jxt
jxw
jxx
jxc
jxb
jxg
il-
aiv
nij
niy
lcm
531
ply
plm
obc
obt
waq
mtf
umc
3ar
umn
guk
eku
uze
aou
yir
pnp
pna
4us
wcr
jtm
yti
gwo
gwi
vui
eie
tli
ps4
d-a
d-c
d-r
d-t
d-w
u88
dze
dzy
rjs
rja
lo-
yof
yoa
wmi
wmb
4pr
c-h
rbl
c-p
ffe
ffc
if-
nc-
ncn
ncd
o-s
o-r
tft
sdq
ymi
66s
adx
tw-
3ga
3gg
jsg
ruu
bga
idp
twl
pws
svn
svs
tmb
tme
kcm
tmo
ycx
ych
vbe
tpi
4st
jnc
jnb
ecd
ecy
cy1
sny
cyw
cye
f1-
awt
awh
fjm
urm
fjx
ngc
ngn
ngh
mgl
zyr
268
k-m
a-r
jli
jlh
n-g
eaf
dmb
dme
hnp
auo
aup
10m
qdl
xhy
ygo
681
wuc
jjt
jjm
jjh
jjd
gyd
gyc
064
tyo
tyh
ba-
slr
kiu
xjt
803
yec
qzh
wws
wwg
wwl
wwo
zuc
iqs
a1s
fse
jh-
pvd
ayx
ayr
oom
oor
bgm
xlc
zsf
20l
ptf
cqm
cqg
bmp
hdi
ptp
vhs
iun
gga
twt
jdh
deq
jdt
jdr
qq4
bka
qqv
pcu
okj
pc0
sbr
sbp
mmy
sbt
mmh
zjf
cfm
rrc
fuh
spd
ewh
dgm
jbr
oel
nsb
nsg
mka
tdi
vle
ddb
ddf
gcg
gcm
942
tsj
tsf
58s
qut
bwe
pgc
pgi
ogd
sfg
sfb
vro
tjb
ysh
ysb
wys
gm-
ad1
njg
njt
rcr
rcg
oah
tjx
nwo
mwc
kuy
kuo
wdw
iub
clk
mpm
go1
nhm
thm
ocr
pk1
nuh
muq
wh-
xxt
wfb
wfa
wfs
hsf
hse
hsz
pym
ah-
nns
tiy
tii
icg
lli
b-r
52g
msf
kyt
xzl
vte
gth
ppd
gtl
gto
pph
qqj
dpl
jws
yuj
yuo
hux
fc6
anq
uya
fcm
nlc
h-o
h-b
7se
ssy
dyh
dyi
lnl
rme
509
wbb
wbm
1lo
ehu
g-p
cbf
cbn
cb-
t-t
alx
j-c
j-f
j-e
nbz
nbj
te-
sbf
611
s-o
ize
izh
3dh
gpf
i-k
cdv
bfb
hio
arj
tgt
xss
677
xse
yly
ylc
xp-
edb
edf
edn
ed-
hki
hkf
apy
fef
fez
taq
p-s
kba
byv
byj
nfa
ouc
oua
oud
vcc
lte
f-e
ltl
yba
99h
dlb
jof
tjg
r-a
r-e
mfo
tcg
tcd
kdl
kdo
11k
11s
ndi
nda
ndt
bci
bcd
my8
bct
968
058
056
dnw
tx-
jmh
jmg
czs
mdf
dsr
atj
170
17x
17r
17p
nzs
sj1
irv
prb
frs
prt
prw
tzy
sjt
tzl
dhc
tz1
jkd
olp
qiq
815
gza
fpc
ssr
djz
798
blb
onj
onv
zoa
zoi
zow
xmj
831
uou
vub
uoo
wp-
ivs
wpf
1cl
gdh
gdk
21p
acy
91s
pbx
196
qme
pyn
kls
yxs
yxw
yxc
itq
hze
hzx
it5
gfa
fth
ftl
jev
dfa
aae
aav
97a
aay
bht
exq
crw
pdt
xay
xaz
xaw
voj
kns
sxz
agf
qvz
trk
odr
odl
ctg
ctl
xcl
xcr
gcd
mjb
vmo
2st
lzs
toj
ihc
juf
fxt
cka
ckc
fx1
cks
dbh
dbp
dbt
sz1
gbl
ucf
x-l
cve
ph-
zgf
phc
ofg
ofr
351
350
ofy
nvs
jzx
3ma
wxx
hpv
hpa
w-i
ptm
ptg
ssv
glh
glc
akm
brz
brm
uac
55s
uwo
zeh
nt-
mvs
ktp
kto
jxq
jxd
ype
jxh
jxn
hri
hrg
ilv
lcp
rns
bpc
bpl
536
pld
obm
xyt
xyk
htd
hts
htp
fbm
fbl
spp
dxc
dxb
spg
ghg
aep
wrs
lme
rls
514
yiz
rsl
wcs
wca
wcb
wcg
mrn
jta
ytc
gwy
qij
tlt
tlg
d-o
d-p
psr
nml
sru
loq
wmr
wmw
wmh
wme
c-n
jrd
puu
egs
ffx
ffm
ffd
ff-
jhj
st1
tfm
dta
stx
883
kax
88l
664
wo-
ypi
gs1
3ge
gsr
eet
88s
cgf
fdc
fdi
tmr
dvs
rtm
rtl
kco
rtv
yce
vba
4sa
jnt
snb
hle
rsd
rsg
rsb
cyo
ur-
awm
fjo
fja
urr
12c
b-i
12p
b-p
sh3
mgi
ke-
kez
yae
mza
zyw
k-f
k-t
4me
k-p
xsj
xsi
jlu
jlt
n-p
eai
eav
dmp
724
aue
upk
hcl
upy
10l
10c
10t
qdr
m-e
vfa
682
wue
24a
jjf
jje
068
ty-
tyb
omt
mcj
slw
sld
glf
xjm
xjh
yeb
wwf
wwi
a1r
a1l
fsj
fsw
jhm
7ca
hbi
hbe
ood
oog
uat
flm
bgl
bgr
snj
snk
148
kku
kki
xlo
ua-
zsp
zsm
208
tuu
tuw
imk
bm-
bmo
bmt
hdy
hdb
oid
kmr
kmp
kmy
km-
yyx
yyc
yyb
yy1
zq8
wsa
hyw
ggl
eyu
jdp
bk-
qq0
csv
hfh
ok5
mmr
zjt
zjr
pt-
867
18m
18s
vnn
2th
ikk
fuw
963
dgf
dge
dgx
jbb
cuz
cux
aeb
30m
oeu
oei
ufu
nso
mkh
sdu
zhs
zhx
zhg
zhj
lya
chd
chk
chp
iip
gcf
euk
tsr
lfd
lfc
cwe
cwg
qul
uds
sfm
ksw
kse
ys-
ysj
ysi
cjf
ioo
iol
gmd
fyo
fym
gm1
nj-
rch
rcd
rcb
rxc
pij
ubt
ubl
nwr
nwl
nwm
scf
scv
gnc
kuv
wdl
enw
clt
wsc
ajp
nhb
nhi
nh-
eq2
lbl
lbo
90s
bsp
bsg
thb
nui
kwm
xxp
wfc
vvi
1ho
ywc
hsw
cn3
cn0
pyb
giw
ah1
ick
12a
pme
sh1
52k
52q
msw
kyp
rye
ejs
yuf
iau
huv
ia-
fc1
7da
fcl
""".split()
for index, name in enumerate(POPULAR_PREFIXES[3]):
PREFIX_SCORES[name] = (8000 - index) / 16000.0
POPULAR_PREFIXES[4] = """
game
free
love
shop
chin
best
info
club
team
just
real
home
easy
live
gree
good
link
host
star
blue
book
your
news
life
play
auto
plan
porn
blog
idea
find
medi
king
city
mark
deal
site
open
soft
card
fast
name
hand
part
time
code
data
safe
inte
show
film
ever
thin
sale
tech
bank
call
post
face
pure
food
sell
girl
euro
stud
fire
trip
true
miss
cell
care
bill
land
shar
jobs
full
talk
mail
file
body
supe
loan
mind
port
help
meta
cool
long
mobi
only
vide
save
read
high
next
look
fish
gold
west
date
dark
fine
cash
park
nice
lead
mode
over
well
hell
more
baby
what
musi
hote
clea
wild
lady
poke
hair
page
luck
test
digi
east
text
list
will
comp
back
sexy
view
vote
line
spor
even
band
drea
room
hard
last
wall
radi
powe
them
movi
join
bear
form
mega
rate
sign
char
fair
mone
core
asia
glob
sure
note
down
tree
pick
spot
phot
indi
fund
mini
flow
pack
farm
unit
happ
seek
rock
ring
mass
rain
hear
boat
teen
stor
worl
make
stop
blac
work
clic
take
race
lets
loca
come
thes
deep
head
move
know
imag
gran
feed
desi
chea
road
trav
ball
drop
very
plus
foto
scan
foot
john
cent
pink
theb
visi
need
main
disc
smar
lost
ship
trac
send
grea
grow
hous
meet
serv
chat
bran
base
four
wine
moto
area
anim
cafe
like
wind
chip
ther
this
pass
dail
flas
styl
vita
copy
the-
prim
give
tune
edge
cold
jump
step
anti
lock
hope
mari
keep
grou
foru
want
used
half
ride
trad
tran
zero
hack
dead
tele
pain
feel
prin
door
golf
cybe
magi
that
town
appl
user
side
cost
camp
self
gift
firs
thec
flat
stoc
heat
kill
some
rent
case
sola
fore
quic
five
desk
hunt
turn
chan
peak
onli
pool
aqua
root
boot
word
wiki
chil
huge
gets
tota
byte
phon
push
fact
pres
tell
wear
kids
joke
lazy
para
here
fres
thep
mine
cars
prom
ange
then
walk
lega
type
adul
thai
stat
wide
made
most
spee
nine
stay
firm
roll
nano
fina
task
pipe
clan
turk
draw
dear
webs
spac
arab
thea
warm
surf
grup
bulk
pric
item
keys
hill
colo
wate
crea
slow
rest
craz
logi
able
rare
late
loop
casa
viet
theg
eyes
simp
flag
bike
rush
must
busy
load
many
watc
eart
goog
nort
heal
edit
whit
davi
into
mile
bell
rich
diet
abou
clas
hang
micr
fall
extr
cure
buys
cart
elit
mike
junk
chic
sear
web-
doma
tour
valu
disk
theh
metr
paul
truc
rapi
ones
sara
gain
cand
have
risk
path
alex
hall
fort
kind
spar
cine
wash
past
urba
arte
fran
buil
week
shin
sand
nets
yoga
trus
paid
funn
jack
days
glas
tank
less
marc
geek
ener
trai
yout
bets
ilov
thel
hits
ston
goto
thef
nova
poin
soun
inde
arti
lear
suit
iran
thed
ligh
roya
plug
leaf
swee
lift
ente
fuck
smil
comm
blow
wise
shel
army
hors
suga
audi
stre
amer
bits
orga
coco
mean
term
poll
wire
acti
same
inno
carb
thet
keen
herb
bite
near
hold
ugly
arts
smal
driv
kara
cred
they
song
cros
bids
robo
chai
left
sexs
bars
ease
dirt
prop
seve
prof
wood
slip
carr
stan
virt
cove
obam
bang
cana
hide
chri
came
skin
unde
andr
terr
cons
shan
enjo
youn
gene
eco-
nigh
pape
casi
from
sens
poli
quit
odds
glad
rose
mont
touc
astr
matt
mast
adam
than
shor
hate
logo
drin
pers
suns
guid
solo
dare
inve
vega
sing
opti
thre
thew
alls
each
soli
folk
basi
fanc
voip
fail
elec
brea
shoe
fill
ipho
sex-
sout
lend
tops
brai
carp
carl
dumb
mult
stra
tabl
gogo
righ
mana
pull
aero
arch
done
beau
mess
flor
pixe
moon
sold
mang
theo
cont
soon
maxi
viva
huma
soul
tian
pros
much
rive
vast
prox
ital
snap
hero
peac
ours
natu
deco
sees
tren
size
dire
phil
lati
dogs
bigs
isla
reds
catc
ultr
nett
bigb
buzz
cele
indo
bett
mart
hots
flex
deli
dell
orde
dian
hour
feet
figh
coll
demo
drew
laws
alph
blin
hole
tang
kore
bugs
eric
tast
thei
alma
tool
stil
barb
java
brow
tube
stic
beta
rise
slim
tape
cara
afte
tiny
texa
equi
pets
agen
offe
cycl
mand
hott
twin
xiao
expo
taxi
intr
anal
toda
dump
ware
seem
pete
matc
offi
1000
scho
cosm
sext
alle
dave
prot
snow
net-
slee
hana
rand
clip
inst
alfa
alba
teac
wife
trap
hong
with
afri
mill
wish
bloc
wins
omni
stro
lose
debt
rule
nick
art-
fans
floo
sexo
sana
sino
sent
bird
mary
chen
zone
futu
beat
beer
cook
turb
todo
vill
forc
whos
flip
publ
onto
poet
petr
proj
secu
frie
prem
fear
gett
webc
caro
prov
perf
beth
poly
whee
dani
wang
fits
hype
ecos
grap
wwwa
seen
prod
were
tria
allt
geta
labe
fant
lake
leve
whol
webt
allo
chao
voic
soci
oyun
acce
kiss
mans
alla
plac
stee
stev
cute
immo
dong
silv
newt
cast
anna
tony
jeff
adds
neve
mira
temp
skil
netw
jame
fors
summ
bull
lott
emai
airs
wwws
scor
maca
stag
sexp
dent
getm
holy
fait
lots
buya
evil
wwwc
ping
doll
once
zoom
japa
zhao
quot
alli
doct
chee
cats
mist
wait
alte
neck
shoo
penn
mama
spea
heav
maps
clou
bust
gone
sant
mall
ebay
cond
gate
sort
taob
mani
laur
insu
plot
runs
webb
pays
cari
roma
tric
alar
clar
poor
drag
nude
scot
pro-
linu
pimp
thee
wwwm
sunn
atla
wher
buym
top-
giga
shad
wave
goth
pars
mens
larg
exit
sama
cher
coun
shen
pari
fram
icon
bloo
boom
reac
admi
merc
spec
chec
clue
diva
scen
moda
cant
ocea
boys
math
baid
comi
scra
cali
mant
iris
mere
nutr
iron
conc
josh
trut
dive
ryan
newb
newc
boss
devi
bart
proc
fami
sain
phar
netb
yous
cour
ecom
tend
toot
when
andy
lang
vist
drug
quan
canc
rout
pant
eboo
soni
prog
citi
mich
inne
weba
such
poem
coac
alan
jazz
bara
busi
tara
brid
lind
xing
hydr
earl
netc
spin
bigt
veri
asks
oran
epic
tekn
danc
wolf
shou
guar
hotc
palm
ling
stea
jenn
dial
vent
kell
aims
vice
sexc
ipod
juli
roun
boob
wing
topt
hung
funk
cham
stri
neta
away
fate
memo
butt
pair
bing
insi
cloc
fron
hone
kino
ment
chem
pizz
pand
lisa
kris
carm
nove
nord
revi
navi
econ
sunt
sund
sexi
howt
crow
dish
inco
jane
webm
spam
sang
meri
role
cata
sham
duba
indy
grac
entr
mund
boar
acts
batt
wome
getb
holl
dati
staf
ming
burn
retr
beac
sexe
barr
buck
sexb
topl
tama
webe
brok
mala
bria
funs
seed
babe
topc
erot
mona
albu
shee
wwwb
zhan
arta
pent
xian
newm
rela
toys
buyt
nota
allm
tecn
netp
conn
visa
cath
sher
tige
salo
dick
thek
fang
corp
webi
inch
majo
topi
bada
hotp
lase
ches
bigc
adve
reco
mond
quee
coup
dyna
gots
agel
lion
clos
says
till
papa
mali
allb
also
chas
bali
chef
webp
ruby
shif
mapl
myco
sina
tone
new-
reno
jian
sala
wast
mort
allp
maya
zona
stuf
choc
mach
quie
nows
sean
artb
marr
eats
newp
tent
ways
vivi
brit
bigp
bigf
kang
youp
aska
peri
peru
cris
arto
pens
myma
vina
buyc
eden
ohio
fash
redt
silk
kidd
puss
allc
airt
conf
webd
halo
car-
greg
infi
joys
wwwt
deat
ford
pops
trib
numb
gala
anne
rede
topp
myca
hotb
atom
thir
sail
litt
avto
cras
grad
mint
peti
mein
newl
corn
pron
jaso
eagl
brin
wifi
fiel
scal
hot-
sele
habe
bore
bass
heli
uber
graf
milk
bobb
blan
cctv
jean
gulf
pola
tras
getc
all-
swis
huan
logs
airb
brad
mono
both
afro
tick
ecot
envi
jing
omeg
lian
vali
sexf
newa
anys
rick
sexa
chro
agro
jade
boxs
writ
netl
netf
bigd
cans
felt
alta
maro
diam
mate
goal
aman
bare
carg
jobb
allf
coff
funt
jess
rune
gods
circ
myst
sets
shir
milf
vans
coin
newd
comb
shes
nast
siam
onet
mars
nois
bala
airf
carc
pear
spri
foro
outs
tall
domi
ding
ties
iraq
gian
sesl
nerd
juic
karm
wwwp
fuel
doub
chun
spli
rang
expe
supp
buye
buyb
guns
refe
mads
topb
tami
phat
angl
mayb
bigl
big-
syst
youa
mysp
woma
fars
regi
magn
guys
seat
kool
none
wwwl
2008
tong
goin
toon
1800
tera
buyp
tens
barn
getf
foun
myne
male
equa
hotm
legs
miam
said
luna
spir
netd
capi
kate
wars
dies
bein
artf
grin
bind
bead
fred
gard
liqu
esco
sexm
olds
seet
cust
ends
remo
horn
carf
youb
muzi
dana
flyt
bone
deai
kevi
sour
gott
arth
pill
diab
sams
gear
yell
amor
momo
amaz
redb
inet
glen
getp
leas
xtre
esta
bomb
bush
melo
brig
plat
funf
lunc
bras
payp
kare
ecol
flys
loss
oliv
artc
twit
jewe
doga
sill
lime
sunb
seri
duty
sole
prob
nico
tatt
echo
deni
labo
mada
spic
wint
topr
reso
asse
masa
yang
simo
priv
netr
jose
divi
grey
geni
ecoc
ecop
soso
bond
bitc
outl
semi
been
alas
troo
glam
pret
ices
educ
thev
topa
knew
ting
mash
mora
scar
plas
fuji
angr
guil
cape
crys
beds
bigm
ambe
genc
luxu
mara
ages
aliv
neur
rank
popt
aust
scre
scri
redc
wond
punk
jobt
norm
jesu
neth
iwan
pint
bobo
elle
soma
cuts
pupp
sunr
putt
popp
accu
nobl
tenn
sexh
sexl
traf
dela
unio
gues
dizi
bigg
focu
yama
youl
youf
vers
lyri
kari
reel
ourt
cock
harr
alia
lamp
nike
buyf
amar
baro
rave
reda
get-
year
vert
manu
resi
clai
clay
whic
myli
hotf
plai
funa
funb
pian
inth
chap
vand
airl
awar
hans
lawn
swin
hire
sake
ecob
wwwf
wwwd
tale
artr
thej
dogg
alis
toky
yard
lapt
toma
ross
biga
redf
exce
trea
busc
topm
myho
soca
pics
biot
noki
airp
bebe
shap
ster
lond
mywe
gray
maga
lawy
salt
gros
artt
sung
mavi
alie
reve
weal
newh
newf
span
gara
boun
gang
redh
geor
onep
getd
lord
spel
arca
bana
rema
conv
lite
youh
catt
doug
agri
dean
perm
gotb
lava
harm
matr
nail
sun-
kyle
newg
anyt
plum
webo
toyo
apex
dine
redp
redi
cake
lego
bios
gays
lili
visu
avan
fixe
web2
payt
bigr
habi
choo
dand
www-
craf
kick
psyc
earn
cano
dash
sunp
alic
smok
qual
sunc
bost
nake
bath
ican
dans
empt
prep
redm
red-
itst
cope
kont
torr
bitt
meds
guan
hotd
hotl
func
shut
dest
spen
netm
lily
born
mods
nati
arma
warp
bonu
went
gota
gotr
repo
jobf
dona
frog
bail
russ
newe
neww
sono
posh
rene
teng
trin
hotw
utah
exac
getr
delt
kidz
uniq
sigh
webh
bads
mann
yaho
puls
invi
bio-
habb
toph
aire
brav
yoyo
quad
lowe
taxs
choi
youm
titl
theu
grav
hats
askb
feng
artl
grid
sinc
vip-
insp
offs
vall
clin
kent
soho
peng
budd
redr
itsa
eyep
getl
allw
hota
funp
pali
baba
pier
meth
mydo
cone
teas
mysa
kart
quiz
vidi
elli
fell
tuto
pile
hows
limi
oils
vend
smit
cora
corr
ands
ferr
buyl
hint
gary
amat
livi
guru
cube
hobb
squa
topo
topf
resu
sush
mino
moti
bene
twos
wow-
dese
myre
kala
socc
payl
bigh
adva
neon
cave
kath
gent
flyb
wwwh
uses
askm
noti
groo
leds
seas
othe
sunm
vale
actu
fanf
limo
vira
muse
peop
popo
newr
hers
whys
onec
ghos
mood
gall
bibl
refi
sick
kenn
cult
topw
calc
mano
pict
hotr
alsa
biom
lama
uppe
drum
mura
does
tina
kiwi
guit
remi
webg
chir
moni
youd
nurs
vias
verd
else
rebe
joyc
nowa
pert
pock
gotc
betr
pump
artd
cabl
outd
sine
joyf
polo
flam
weir
qing
mote
roug
oneb
geti
patc
topd
novo
fake
mimi
jobp
ante
nors
pica
bric
zhen
hoth
mybe
chad
mena
fitn
idol
mojo
dogt
sky-
paya
advi
scoo
gass
macr
ques
dama
vida
youc
taka
perl
pino
artm
fsbo
vino
vant
matu
sist
rail
dayt
sync
kali
bidt
coop
ande
buyw
buy-
caus
barc
mypa
esca
nich
jimm
sexd
clim
camb
rese
toto
hash
inti
mete
hawa
sixt
jueg
payb
cole
youg
dang
shab
aria
flyf
grab
sati
hood
repl
joba
sunl
almo
towe
mapf
fana
fanb
keyf
redd
abcd
uplo
kitt
flig
derm
glor
seks
bidb
hama
momm
reli
trop
inca
barg
solv
germ
aaro
madd
univ
baza
robe
anta
jobl
beli
meng
beni
lala
tint
thro
nana
gast
mysh
dann
wwwn
yuan
skat
dema
demi
wwwg
maki
aspe
outf
sunf
dont
fand
kita
mesa
seco
glow
popc
kama
mapt
rena
sohb
com-
bida
mirc
loli
pira
thum
slut
holo
hdtv
kaya
cami
webr
punt
manc
pana
nomo
belo
pala
seni
twop
emma
askt
sons
nete
airc
air-
capt
chie
wwwe
cate
you-
myse
abra
warr
seeb
shal
cann
berr
ourc
mixe
velo
revo
athe
fren
rely
bubb
supr
buyi
buyr
sona
sony
lola
font
barf
dvd-
duck
jung
deba
avia
8888
jobm
sans
alld
cert
fitb
nowt
mays
paym
kana
myfa
loos
flyl
warn
roof
deca
peta
rome
kaza
purp
gotf
gotp
crim
domo
rady
artp
sunh
broo
engl
keyp
lara
cere
itsm
kite
guia
mesh
popb
upon
acco
secr
buyd
pare
sche
myto
usa-
dall
oner
apps
madm
holi
keyt
maha
tamp
hybr
nots
trys
mane
bron
gong
pico
alll
tigh
fung
chal
mets
saud
tied
crui
gofi
neti
xbox
webf
trul
bean
carn
monk
ares
hipp
flyc
dubl
sats
aids
chur
cang
bold
boll
forb
outb
fora
nong
dogm
mymo
qian
inse
pati
toos
dove
tire
moms
quin
owns
barm
luxe
dual
madi
dora
swan
winn
elma
oldb
cams
novi
cale
mela
alwa
badb
aven
thom
bent
fitt
fixs
dogp
webu
payd
hele
eros
teat
shak
gami
suda
flya
owne
ward
luci
askp
adda
ourb
mapa
frag
insa
sofi
tomm
yong
bidf
kink
trun
pole
took
meda
sexr
getw
wina
gadg
bizi
oper
bitb
manf
manh
carw
blas
civi
myla
buyh
allh
allu
menu
loco
aira
dogc
sixs
emil
carh
heng
taxt
ninj
crai
fare
flyp
arro
stac
deck
digg
askl
lows
pere
pine
gotm
repa
joan
outp
airm
vips
lamb
yeni
alka
oasi
anyw
anyp
goes
kami
bari
anni
sexg
redl
one-
onef
onem
onew
kidb
kidc
drys
olde
raku
frui
fifa
ambi
jobw
assi
eigh
alsh
myba
funm
inta
boxe
twot
adop
runt
beda
ebon
swim
susa
swif
youw
crac
fara
flym
rega
indu
sheb
shea
shem
mina
eyef
burs
shit
eyeb
myme
keyc
suma
leon
popa
rust
lett
patr
lumi
floa
depo
stam
noma
oned
myth
madp
alam
cama
topg
unis
bizz
kral
bitf
espa
jobc
mons
nora
nori
allg
sims
orbi
impa
lanc
lemo
fitc
fitf
aren
dist
haha
netk
myde
neva
peer
payf
forl
orth
myso
wwwi
tita
wick
orio
fata
erin
aske
shim
mila
gotl
gotw
artw
outc
mapp
hart
mymi
esse
reta
bilg
xtra
keyl
sumi
sage
bamb
flir
beck
musc
isho
blis
bake
weed
xiny
azur
barp
icep
lean
madf
madc
madr
kidt
1001
legi
este
unic
woor
manp
manm
jobh
dawn
mylo
ahme
allr
impr
twee
pris
boxb
mend
gay-
lila
endo
juan
phas
chez
banc
braz
bigw
kand
vict
forw
aura
hent
vial
catl
gasb
gasp
dino
ebiz
shah
arom
dogb
cala
wwww
wend
cani
canp
lawt
mais
ourd
ourp
yess
bobs
shot
oute
acro
noni
dogf
keyb
safa
icel
libe
seo-
taco
viag
aler
syne
expl
mixt
popf
anyb
trim
proo
begi
sere
surg
barl
mede
prec
sexw
cuba
onel
madt
lotu
pasa
joel
hotg
mybi
fame
bela
funw
saba
lack
airw
twol
cong
chik
nina
lesb
neko
cran
fart
inge
ourm
sari
swap
mypo
oman
crew
lucy
hori
canl
forf
addi
alti
ourf
eyet
eyec
bete
betl
twis
outw
onth
pend
whoi
toke
emla
dura
oneg
dayb
fusi
mixs
popl
1234
jord
tena
wool
rosa
solu
whom
acid
sexu
soph
ampl
sera
sema
biza
badp
lawf
lawc
robi
tryt
rfid
assa
egyp
all4
funl
seel
pale
boxp
poun
catb
crus
seep
izle
aird
runl
coli
baja
lowc
chor
raws
anon
feds
stel
thri
kata
celi
ecof
ecor
gest
grat
joyb
hitc
hite
rota
shed
dres
warc
mp3-
hoop
koko
askf
chua
nowp
canb
maka
savi
burg
kwik
proa
betf
maru
sami
filt
mapb
vine
keyw
isra
lame
libr
tenf
virg
mygo
popu
dada
anyl
relo
bidr
bidl
pixi
judg
hisp
hiss
penc
laca
gunb
pray
ince
bark
wein
dale
icet
mytr
obey
squi
winc
vari
doin
buss
herm
bdsm
affi
tora
badl
dart
brie
aver
taxp
2012
dock
ying
brew
syri
maui
dogw
runp
fest
reme
bigo
erro
scou
taxc
yach
khan
reci
dram
aris
hitt
armo
cres
nowl
perc
betp
betc
beto
grip
sass
sunw
penf
samp
vanc
nola
kola
feti
itsf
venu
foxy
sett
tomo
joom
bati
bidp
bidc
tris
andi
anda
buff
dali
icec
thor
geth
myte
myta
oldc
hond
saas
meli
webw
sock
bitl
nott
lawp
tryb
norw
norc
oldp
elev
alna
bioc
bion
bend
fitl
youk
soap
endl
sada
mera
samm
amus
neto
bans
fixt
airr
twoc
alum
cola
lowb
saka
youx
saga
farf
hita
quik
dais
dost
nata
nava
seef
askd
nowc
cane
addf
lone
wake
samb
pilo
outt
vanb
vani
goma
boya
mout
popw
ista
zing
tail
dayc
expa
anyf
rele
inmo
bidw
oneh
hist
como
pena
moor
jeep
dina
brun
homo
iceb
faux
onea
alon
madh
boon
hert
saha
oldm
apri
tope
bizt
lees
spok
spai
bita
lawa
slot
tryc
manl
resc
ourl
ally
rais
goli
sima
impe
tari
boxt
bens
hawk
lill
balt
seos
netg
bann
skyp
webk
payc
chim
lies
auro
neop
hiro
skip
nanc
gasf
todd
rawb
shaw
kati
nara
flyi
gras
scha
kaix
hata
sata
seec
eyel
pet-
neat
subs
saku
pina
yesi
yest
lexi
gril
shre
gore
suna
sms-
sile
fanl
fanp
vang
snoo
vibe
newy
seth
toms
pate
rans
atti
scam
kine
deve
uncl
surv
wooo
wayt
penp
gunt
saya
soup
redw
itsh
gete
knoc
appr
lori
ates
kidl
vary
lege
tres
spoo
kuai
bitp
dome
manb
blad
defe
meal
adap
mehr
xxx-
bris
tobe
elem
fitm
meto
fitp
alha
lust
myfi
dust
netv
tana
loga
runf
goda
capr
slic
whyl
bizs
berg
forp
pran
ciga
youy
vera
supa
joyp
joyt
hitm
arms
agre
nowf
perr
gotd
ajax
trya
artn
anth
outr
trio
asta
nono
dayf
donn
jake
keyr
keym
oilb
wowg
lapa
sofa
jaco
mygr
al-m
apar
alem
kaka
acad
cort
bidd
tril
kame
judy
mapr
turi
cass
buyn
beyo
budg
lace
schu
bard
laba
getg
atel
oldt
oldf
trek
apro
hera
slam
pest
sate
lawb
jets
3000
nore
sani
dood
html
famo
migh
rach
meno
phoe
issu
shun
tier
aval
tudo
sits
tant
emul
fixf
twob
mayf
chia
gash
carv
forg
catf
viap
viat
lipo
infr
tabi
mage
hitp
mech
schi
mypr
wart
meat
bona
hook
acqu
petc
askc
nowb
agor
sava
noto
addp
addb
maid
betw
enco
manw
oral
mapc
coas
sect
dogl
sobe
curr
lens
joey
luis
skys
alin
alib
itse
stun
seou
seta
flic
baob
gato
daya
toob
mixi
popm
anya
nikk
vive
ulti
tenm
troy
pane
avon
buyg
deto
laco
kira
hoto
sals
whor
prol
bruc
redo
icef
leav
yeah
herp
kidp
wand
winh
acme
estr
teal
teab
woai
emer
notr
bade
badc
jobd
asso
gosh
cach
siri
taxf
sint
lasi
biof
biop
drun
rama
zhou
oldn
skyb
twof
runm
baha
evol
argo
yume
adri
macs
viac
wwwk
mexi
hiph
navy
geno
ecod
flye
tala
shep
agil
erik
arra
meme
acne
askw
chuc
canf
kash
aspi
proe
addt
betb
seal
cabi
vila
soda
samo
rant
dons
finn
luke
vanp
vana
taxb
smoo
gaze
deri
patt
dodo
kuwa
mixm
newn
coro
ghan
bide
sixb
peda
mapw
nama
alab
klik
alpi
dutc
ozon
sali
pont
cade
sayt
igot
cozy
sexk
sexv
itss
myvi
mafi
fuzz
geoc
dena
madl
wink
winb
rong
haya
sayf
adel
timb
teap
seew
mell
webn
tile
notl
badf
jobg
blaz
broa
kabu
ohmy
tote
myna
codi
birt
slin
ichi
bren
avat
ador
chel
twom
runb
rund
teet
caps
kuro
ethi
zeni
catp
kims
biz-
rawf
evan
celt
modu
succ
usam
wima
hath
warw
warb
sato
bong
esho
blur
pett
canh
sili
ella
lawr
addr
myin
pera
perp
cutt
ledp
ledb
job-
valo
aste
e-co
actp
acte
hari
fanm
vanh
smut
boyp
mixf
oilp
alim
lami
mata
beam
wini
mask
kais
myga
meso
dayd
haos
maxx
max-
anyc
amon
alga
node
kamp
levi
telc
kend
tenb
tenp
noth
coma
blon
toxi
gali
dvds
brus
nash
itso
lore
kidf
winf
wwwr
dore
kong
abus
badg
badm
robb
anto
resa
rage
scub
gosp
gost
mori
alln
mybu
biol
boxi
nexu
fita
fitw
bump
enet
phan
zipp
inpu
spid
netn
airg
airh
dogd
dogr
toni
kale
capa
anew
jone
whyb
chit
noco
hala
kiki
amba
henr
asap
chou
catm
hypn
viaj
wwwu
moba
kora
ecoa
farl
modl
suck
joyl
joym
hitb
hitf
usab
chis
tips
jami
satc
mp3s
cree
uset
bitr
tins
aski
lowp
adde
addl
shoc
jobr
nose
outo
marg
sune
acta
engi
boyc
2000
tram
outa
moun
oilf
tinh
bina
itsp
stuc
newi
viru
fabr
winw
al-a
amin
amig
frea
dayp
poph
mayc
cord
mapm
bidm
wayb
comf
buyo
scru
disp
gunc
myra
flav
viab
exec
amal
cota
empl
bois
appa
mado
boos
dele
scie
kidw
kron
bizb
boca
sibe
sawa
toro
satt
didi
resp
zion
loud
viol
mybo
vipc
biog
taro
usta
priz
babi
bena
bree
sita
merr
nate
the1
myda
bant
logt
fixm
fixi
dog-
ceda
peni
bign
sayb
arge
befo
prat
taxa
spra
buga
mysi
iber
viaf
paci
infe
farb
flyh
flyd
flyw
tuna
inda
gram
hitl
warl
warf
tien
wwwo
yesp
bonn
rice
spaf
oreg
mili
addc
asma
oura
ledf
artg
thex
mare
ansa
eate
vanf
bili
taxh
larr
mych
croc
midi
myad
reha
dere
elco
popd
niko
acai
atta
rast
kens
tenc
panc
vest
bowl
ikla
xinh
moos
disn
geto
usef
dane
fone
whot
moss
sexn
sexx
elis
avoi
ashe
madw
kida
kidh
menb
loto
spit
plea
bitm
citr
notf
badt
elan
wayl
sums
sach
kaba
masc
plaz
golo
taxl
funr
edus
funh
intu
boxc
mene
fith
docs
foss
abby
rein
sade
desc
fema
huay
puta
zhon
twow
brac
kept
bedb
bedr
divo
hali
kera
wedd
neoc
catr
catw
satu
deko
vero
anan
held
mish
rawp
asco
karl
mack
stem
fibe
hock
modt
tuni
tori
welo
shef
usag
ergo
yesh
minu
seeh
eyem
shik
addm
beij
betm
the2
grim
caba
almu
acto
samu
iden
usin
mamm
mosh
vint
reti
vane
vanl
itch
iced
boyf
boyt
sind
wowc
setp
hann
hain
offl
nite
dayl
ranc
expr
maxs
popr
mayp
usel
bidh
maph
gaps
zomb
turf
fris
wayn
wayf
coms
clot
gund
smas
pony
ecli
ugur
aimc
gemi
tetr
prev
icem
sorr
pika
voca
exam
tere
june
kenk
uni-
hurr
oldl
camo
dott
dota
top1
unif
wrap
lare
lovi
winp
badr
tiki
tryf
manr
resh
noon
noor
mick
norb
synt
taxm
funi
spas
impo
sene
ibiz
metl
catd
endt
sadi
arco
airo
skyl
awak
godo
mold
cups
rece
taxd
phys
twor
anar
spre
adre
recr
jour
maco
wwwj
gama
pero
crap
ecoh
modp
modb
modm
rebo
hitr
hito
ownt
deme
whob
anit
aide
tani
nowr
feli
runn
altr
cabo
trou
dogh
cupi
fini
meis
fanh
fanw
aloh
enge
howw
howl
wage
volt
2009
mous
cera
oilc
oilt
wows
iste
lele
apol
sash
rall
modi
flin
pata
cibe
mixb
newo
cork
andl
andf
algo
stin
alpa
whyw
dorm
tri-
aces
sont
smac
pond
cadd
atra
moch
tata
pyro
gand
icer
sora
itsc
elif
yule
wils
thun
refl
degr
sayl
wass
copa
busp
menf
sixf
tims
teaf
sigm
mele
webz
bitw
notm
mong
e-bo
luca
norf
iams
sany
morg
xxxt
upto
pang
moth
ivan
targ
paws
fitr
jiao
metb
cati
vaca
faci
ende
desp
merk
sogo
suba
tole
logf
logl
fixb
fixa
alde
twod
aran
peek
payw
trum
whyt
jona
gigi
kurd
kura
subm
rude
ecar
lawl
ditt
jays
cat-
anas
gasm
gasc
bugp
bugb
mysu
bott
acha
arie
jumb
ecog
naru
farr
flyr
mywa
tali
ourw
shew
shet
usas
stal
ajan
wara
yesf
isee
anis
eyew
pano
my-c
nowo
canw
gyps
bert
fori
ethe
zent
weig
perv
ourg
leda
gote
sea-
jobo
orac
actf
lavi
eata
lata
hara
essa
meil
vanm
boyl
elde
tibi
boda
mati
setb
wack
glit
hatb
bund
nitr
dayw
haoy
toom
mixp
bait
isha
ishi
naka
anym
hund
cors
pipi
123s
peep
ando
andb
axis
mapo
coal
casc
letr
deta
ilan
oddf
lach
hemp
cinc
bour
emre
gale
tati
purs
gina
sori
rape
flee
geom
denv
piec
asha
zebr
unim
dryb
winm
tyle
whis
busb
sixl
dryf
bizf
bizn
jang
joll
tayl
sous
dida
whow
emed
notw
jobe
blak
dara
assu
iama
sanj
elca
hast
hass
gola
mybl
bele
bioa
lane
boxl
menh
meni
2010
bumb
aret
awes
abbe
enda
myfo
bald
haus
biki
logm
twoj
runa
runc
fuse
slid
mayo
payo
whyp
moll
dach
humo
divx
kurt
quar
slav
rubi
taxw
albe
bask
rawl
shai
dima
exer
infa
tits
oste
auti
illi
obje
iptv
aqui
ourr
fati
whyn
gaia
whof
itis
jama
loui
seer
noww
canr
zenc
cuti
ledl
gotg
auct
yesc
yesb
yesw
marm
deem
fili
eatb
fisc
aget
zeta
keyh
howi
twoh
curt
kirk
joes
lima
reva
wowo
idah
kitc
e-mo
wino
ills
maso
izmi
bung
iowa
raid
toop
mixl
anyh
anyd
sixp
bido
andt
andw
eatp
pani
nest
gunf
gunp
cind
scop
wode
wego
deja
aimf
gill
salv
bar-
lowl
abov
gems
isma
usac
icea
fros
itsw
york
geo-
viny
amen
inci
asho
tema
booz
elvi
delu
heri
unib
unix
winl
doit
prid
effe
menp
herc
dots
slas
doth
topu
easi
bizd
aden
myha
leet
pion
exot
cobr
notb
lawm
lawe
badd
tryp
antr
defi
nave
jell
sanc
hase
doom
bogo
fun-
owen
iweb
simi
boxf
atoz
rack
boxm
jiaj
aplu
abc-
gaym
reis
endf
rabb
snea
vogu
shui
fixl
pott
airi
lulu
skyc
jaya
mayl
otel
payr
whya
kans
yumm
shis
viam
xeno
cato
poop
nann
bugt
tuls
miso
gela
boto
bota
dami
dame
iman
stei
0769
guyf
jill
sena
waps
ownb
anka
fatt
hust
sheh
napa
hatc
tela
arml
deci
petp
nowm
ihat
bere
milo
addw
puri
omer
ledc
1717
tryl
gotn
eyeo
deer
diar
loft
actr
agua
eatm
goos
haru
tedd
dono
1314
rete
vanw
vann
howd
taxe
alit
usho
inso
ozel
toko
fabu
setf
zens
hail
e-me
mazi
al-s
amic
lain
frei
daym
tooc
maxm
mixa
maym
gave
bata
komp
devo
mome
rend
pcma
raja
tene
nobe
hish
comc
whyh
goco
whyc
auss
adsl
alco
alca
guna
0755
gunr
aimp
e-sa
e-se
akti
orla
iceh
heid
itsl
mebe
cema
deng
naut
getn
mito
ashi
kawa
madb
keno
dryl
wana
wann
wedo
oldr
oldw
nuev
busl
busf
clev
debi
herw
heps
jana
calo
leed
jers
tear
teaw
saws
kons
roba
qata
kolo
wong
favo
hasa
goss
boxa
bioe
spat
chac
boxh
boxd
benn
ustr
metf
myfr
nada
sadh
wowb
mert
olda
told
logr
logp
angi
moja
bebo
mayh
bord
isis
miro
bede
kuru
nepa
anch
keit
forr
sese
zang
taxr
ladi
neo-
youo
spro
kimb
gasa
6666
mac-
isol
xang
swed
octo
octa
fari
modf
flyg
inee
joyw
joyo
joyd
orie
ownf
roto
miha
whoh
calm
armc
warh
aida
usem
usep
bolo
bury
jena
forh
shib
ourh
yesl
crit
eyeg
lick
baga
sumo
cabe
fals
penl
nach
roge
aged
aloe
ruth
keya
howc
boyb
curi
lamo
wowa
stub
seop
hava
vort
flix
beco
hao1
maxa
newk
mayt
hamb
hamm
snip
123t
nami
copi
tenl
goca
trit
buyu
hema
kuku
http
useb
usea
aime
pedi
ingr
chlo
abor
miao
empi
annu
med-
jims
fold
ashl
kosh
edil
geop
geos
geol
eter
broc
mika
stab
appe
juni
dori
kidm
dryc
dryp
taok
esto
busa
buse
beso
herl
amya
soto
lays
asla
jerr
satp
spon
soco
myno
wayo
trym
jobn
twic
onda
dari
clam
gose
myjo
bian
malt
xxxs
hoti
fiji
mian
belt
riva
360s
chab
lant
lans
coda
benz
rode
fite
tiep
stol
mett
tieb
lotl
reik
endi
sitt
sado
sads
stup
onur
sper
chep
netz
logb
dogu
myro
miya
capo
aram
bedf
bura
bolt
teks
neob
viar
hima
vipt
bugf
bugg
fool
reca
inho
rawt
rawm
vian
fede
coug
drac
olym
gens
farw
fly-
guyp
tenh
joyh
orig
quil
meca
schm
natt
ebuy
paga
shec
usan
usap
jiny
stad
hatt
inbo
usen
rewa
hidd
askr
koto
nowh
nowd
canm
savo
shiz
shiv
medc
dien
itec
alto
perd
dude
guym
guyc
ledt
cric
eyea
eyed
shok
seac
bags
theq
fila
answ
suny
asto
trie
rura
mape
actl
eatw
tutt
triv
ageb
vids
tasa
howh
snac
volu
flux
amwa
lena
vipe
oilm
futb
wowt
mats
lapi
fuku
myar
myal
gazo
agap
hait
illu
hatf
pats
zach
offt
musl
amis
usec
hane
tood
pope
gogr
clie
kele
cori
opus
atte
porc
riot
onei
widg
penw
bidi
tibe
clon
rosi
hind
oddl
kuma
ownc
tidy
refo
xuan
hila
viad
amst
aiml
cote
barh
jare
myti
rhin
anno
onco
redu
redn
gera
itsd
walt
0731
abst
ahea
abso
boog
delo
herd
herr
knit
enta
koch
camc
null
hono
bizc
acor
saym
satr
ilik
e-de
webv
kraz
biob
whoo
bado
salu
badh
wayp
wayw
wesh
otto
vamp
clau
zest
noro
medr
moro
hotv
emar
nopa
rite
funo
piao
eure
biod
raci
fitd
doco
metc
bret
niss
acer
otak
desa
chev
tans
wors
gofo
mydr
bani
aunt
runh
brat
cona
hani
gode
emin
bedo
1st-
bige
noca
hena
bia2
lade
chop
arel
nani
cowb
eazy
balk
misa
rawc
geld
dime
cava
coul
gamb
itma
watt
katy
ariz
ridg
farg
farc
farh
rays
modw
magg
hith
ownp
sheg
stai
baya
arme
telo
fift
wwwv
zapa
askg
asko
askh
bole
forn
0086
cisc
pins
cutb
ledm
yeso
in-s
artv
safi
nost
nosh
shri
gori
filo
suni
alme
mull
e-ca
komi
actc
eatl
fist
agef
mosa
orch
vinc
keyd
howf
bach
outg
oill
woww
itsb
bama
sacr
lape
tide
upse
pulp
reba
agai
setc
setm
seto
e-ma
myge
bunn
mest
amir
fade
usma
sevg
sevi
arbo
maxp
baix
popi
goge
newj
dadd
anyr
anyg
huna
inma
lowf
weca
yuki
deva
mapi
gapt
serg
eath
sura
suri
frid
djmi
nobu
atto
brya
harl
oddt
oddb
disa
boul
prac
dank
barw
atri
whod
fath
lowr
pren
pref
escr
vick
jimb
geog
geof
nuke
cair
nige
herf
knig
prio
menl
bese
herh
blen
pitt
dotc
omar
unip
calv
duel
teah
eski
tore
dids
tryg
tinc
yogi
osca
peli
pans
vect
atma
vela
sela
gole
arya
kilo
lasa
edu-
cody
thic
docu
shua
padd
gayb
gayp
haka
goho
acet
jojo
alch
rekl
wort
fixp
runw
twoa
kall
payh
whyf
mole
bedm
bedl
jeux
unse
kema
lien
kane
ethn
slap
swit
drma
chon
chos
prou
hims
pork
basa
misc
damn
loon
pace
pach
reed
draf
itpa
sudo
fard
faro
fark
farp
sost
maza
itra
bibi
hitd
rico
ownl
schw
onde
fats
olim
usad
whop
jinh
abri
mypi
armi
satf
anil
cums
zapp
minh
seea
mins
askn
askj
nowe
canv
taki
veda
etha
bern
beri
pror
proh
addh
souk
pinc
guyh
cutc
ouro
thus
goti
gotu
yesd
espo
boba
puma
reef
shon
dice
muni
365s
artk
myow
net2
pili
dolc
outm
marl
goro
falc
valv
spla
raff
mapg
actw
ging
eatt
eati
penh
zune
jogo
2020
agep
atlo
howr
noli
howb
taxo
cogi
lari
gomo
gome
bist
boyh
caff
sins
sink
oilg
oilw
rabi
seob
midw
myas
myan
tura
agar
setl
norp
offr
mese
e-ka
maxe
gloo
beef
mayd
mayi
anyo
acar
lago
hams
trig
klip
tenr
comu
leba
whyd
zhua
furn
isle
letm
leto
harb
adsp
wata
oddp
init
sond
gune
prag
hotn
dant
danb
sayn
rogu
atre
atro
pred
dala
redg
nemo
kost
itsi
trak
clix
wuha
mcaf
mits
atea
kidr
itha
mcca
taot
oldi
idle
bles
pitc
shji
topn
kidi
myhe
layb
papi
jere
skul
0898
kona
tort
avis
seda
notc
badw
robs
parc
mank
blab
darr
kole
myle
sank
pash
syna
caca
malo
scat
invo
wuxi
rona
kadi
adco
taxg
fune
sira
intl
hexa
chav
xinl
boxr
sabr
sabi
fit-
edwa
meti
klas
lile
endb
rams
goha
5sta
moha
alja
chew
tand
yeme
sonl
sonm
mydi
pega
banl
banh
banj
logh
fixd
fixc
alda
runr
hebe
noel
dixi
pubs
arac
boro
posi
miri
doyo
tess
bedt
colt
kank
gasl
cary
goba
fork
chok
youi
satw
gasg
bugc
rawh
karo
the4
azte
kint
aima
rope
fedc
kobi
ecov
raya
modr
modc
lide
tabo
rege
rash
grai
joyr
joya
usat
biyo
bayo
jams
omah
armf
sath
kiev
buta
eyer
liza
laze
nowi
cany
kast
swag
boli
buro
lieb
diem
proy
thie
cutl
yesm
yesa
rowa
tout
mard
loja
pepp
emba
outh
symb
onca
weth
pyra
suno
fala
vala
mapq
eato
rana
penb
goon
parr
gobi
mosc
wowm
fann
fan-
vanr
bile
bild
nole
howa
snak
seme
cogn
gomi
ibuy
seki
kani
toki
mida
naga
setr
alkh
abac
ozgu
illa
risi
hidr
offm
diyi
my-s
toor
mixc
vlog
newv
beet
blit
bliz
amou
koma
adju
123a
123c
stim
onen
itre
copp
noba
comt
casu
buda
gunl
sall
goat
aimt
rugb
sayh
cott
inga
lowt
prow
soba
sexj
volk
sore
soro
0577
0571
tach
fles
clif
afla
cubi
thon
wons
sayp
aten
kidn
unir
dryw
wank
wans
sedu
oldh
estu
menw
simb
blea
unik
bize
tea-
juri
soch
bith
epro
lawd
tryw
tryh
espe
jobi
wese
parl
sumb
puzz
fryf
sane
hasb
susi
inva
kobe
kred
eman
leta
thou
eleg
unti
pals
linc
lina
lank
goji
pawn
e-ta
endp
iboo
adob
sith
myfl
balo
dush
bale
glue
witc
metm
hepa
stru
netj
netu
banb
logg
logc
ango
cici
bram
sixc
neos
emir
moli
tapa
oilh
hale
kant
tams
yasa
rech
aeri
puff
asah
orta
anak
sier
gasw
bugr
tumb
neil
bast
bash
raww
arka
maci
quer
danr
paco
sten
ekon
vibr
puro
modo
guyb
atas
onse
taba
pist
teni
hitw
schl
usaf
itin
whoa
nego
myus
dece
isel
boni
useh
agra
minc
alza
myke
petb
petm
memb
tinp
chub
doha
tryd
vedi
bola
anwa
lowm
zara
phim
zend
sega
perb
perk
ledh
yese
mema
beti
dola
gord
inov
tuan
alpe
goof
kach
dogo
dogi
donk
bema
agec
augu
reto
vano
enga
howe
alra
bisn
bish
boyd
ecoe
crop
vipi
frat
haku
toku
istr
yaya
beas
sydn
sete
qwik
atha
hose
hatp
derb
mygi
zink
beca
offc
offb
alen
haoh
enig
my-f
toof
tooh
maxc
glos
mixw
mixd
piss
voya
newz
myer
zoot
ferm
laga
sixd
hame
smel
123-
andm
andh
andp
judo
sohu
tron
vivo
hisa
hisc
penm
jons
clov
leth
letl
xind
budo
tenw
abit
ownm
gunn
gunm
gung
myri
thug
cina
elpa
lolo
tato
tate
pedr
aimh
danh
baki
fond
gupi
3721
kasi
empo
elre
bigi
nica
gana
foll
raps
itsg
flew
flea
cliq
rita
knox
rica
leap
sasa
sayc
dryt
whiz
sahi
oldg
maji
saun
lota
debr
topk
asli
trol
jerk
tead
sawt
webj
viki
tuli
punc
bitd
taks
badi
gour
jobk
bewa
defo
tinf
grub
noob
upda
mmor
mica
paso
pich
boga
nomi
hotj
toba
wron
elek
bior
lins
aswa
emon
babo
benc
fitg
muta
mutu
cage
metw
gayl
gaya
inre
lilb
abba
lush
haut
uspa
seon
skyt
dril
drif
godi
borg
bora
peel
opin
payi
payn
payg
paye
cryo
sote
bedc
bedp
buch
pave
rake
drog
nolo
gobe
ambr
shil
asan
nero
tima
verv
anac
duni
skib
kimo
kima
sito
igni
inha
rawa
rawd
setw
macf
hipe
i-co
paus
loot
fedf
fedm
usew
stir
ache
ipla
sapa
gend
fibr
ecow
ifre
pura
sosi
mywi
tabu
indr
yosh
gita
oure
ownw
meco
riff
dosi
hess
usar
stas
lowh
swar
sh-h
enti
tipp
eves
mypl
osha
mima
jamb
armp
waro
akin
satl
lous
loun
kota
savv
burd
subt
berl
tosh
tosa
died
swea
aspa
lixi
addo
guyl
cutf
cutp
ledr
lede
ledo
got-
seab
rada
rado
artu
embe
marv
sunk
acth
seca
penr
mymu
mami
agew
agea
gopa
finc
fing
sila
fano
keyi
howg
howm
baca
geri
tiff
bidg
mycl
mycr
curv
kire
leng
zama
popg
popk
cron
skye
ucuz
wowp
wowf
seoc
pifa
hifi
leco
ruma
bulb
atho
tomb
noho
gsm-
hyip
ders
lepa
blum
taiw
offp
mush
musa
mero
vitr
vito
diya
reju
dayh
dayg
haod
flop
pop-
anye
harv
abel
attr
furu
cyan
123g
123b
pixa
andc
weco
smsm
hull
sode
lowa
surp
soha
djma
1920
hisb
hisl
hism
comr
buyk
ween
xins
jiay
sare
hare
oddw
dise
disi
sonf
sonn
lise
gunh
incu
bout
cupp
cadi
sayw
shba
0595
whog
whoc
mype
dvdc
boke
lexu
rows
zeit
heim
wawa
hyun
geob
cain
denn
tobi
vapo
getv
arse
anun
podo
kide
spil
ofte
isim
elmo
wasa
ronc
old-
besi
onyx
dote
eweb
leec
vlad
hola
didd
kras
bito
notp
lawg
pima
wesa
blam
tinb
vald
repr
masi
nobo
razo
gosi
morp
allv
bima
rond
rons
opal
letu
eles
simm
i-re
vipb
dete
vipl
tart
lana
seno
sabe
boxg
boxw
benr
benb
fito
tiet
nadi
doca
tiel
shum
gayc
gayf
icha
lilo
vish
paki
lale
yasu
ramp
drup
siti
lesc
tinw
huax
puti
hvac
chet
horo
banp
banf
sack
vets
fixh
yank
airv
coni
hano
hany
godf
sixw
bors
saki
cryp
pay-
vata
bedi
khme
alfi
cosi
ecua
argu
caru
tame
quat
gobo
slac
lloy
waym
lawh
biao
liti
haba
chol
nuri
youe
catn
deka
verb
anat
kaku
nand
labi
toyc
khal
bugl
atst
foxs
e-re
macb
kome
queu
viaw
aaa-
naps
reho
gimm
shas
shat
shao
fedl
ruff
somo
lips
infu
e-pa
popn
crav
nari
auth
sos-
sota
hadi
kava
weli
schn
fat-
nath
bays
vash
haze
bons
umut
dige
myko
petf
nown
ask-
cank
oren
imin
berk
diep
dieb
diec
dief
laww
segu
xero
guyt
mait
ledw
ledi
yes-
eye-
butl
fenc
seah
gaga
arty
alve
myon
lavo
korn
marb
nons
mapd
actm
eatc
harp
noso
dond
pard
paro
agem
keyo
keyg
baco
iceg
twog
boym
curs
kiri
bodi
vipm
alig
frac
seog
saca
pote
nika
skoo
hatl
musk
mesi
dayo
my-t
toog
arbi
mrma
bees
niki
dads
fern
123d
proi
100p
alal
coat
frig
wayc
waya
trik
piso
garm
garr
amco
sonb
sonp
taga
humi
salm
flar
nume
nome
amas
dair
e-si
togo
preg
dill
icew
uran
0579
rapt
tact
elia
caif
clit
miki
suzu
doro
drym
sedi
winr
koro
legl
busw
tyre
sauc
blee
besp
debu
diff
spik
inks
jand
myhu
teli
24hr
layl
timo
gowa
pele
teag
vegg
vege
atak
atar
e-do
avid
torn
mrsh
kuan
biti
cita
weby
espi
espr
ants
suml
e-ba
toth
exis
mmos
witt
frys
kabo
mylu
ehom
laun
morr
hugh
dedi
sayo
koba
zeus
acur
letp
elen
sire
pall
palo
pal-
tuff
adma
boxo
menm
pour
roda
beng
chow
mova
metp
shur
tief
saig
viso
avas
lali
endm
sitf
mers
lacr
tolo
uspr
koda
sone
logd
fixw
niho
skyf
skyr
tona
hank
fush
sixe
cedi
pose
fizz
isit
whym
bedd
bedw
bigv
kurs
kerr
kern
lowd
loka
wayh
yuma
sess
aera
foam
neol
youv
verk
kimi
hoga
albi
bugm
helm
ero-
mysc
diri
mise
eddi
macp
mace
macc
dein
isot
anor
damo
elsa
shau
fedb
mobe
moby
athi
lipi
titi
titt
poka
nhac
queb
genk
crab
flyn
dyno
pisa
arou
cena
dosh
dosa
dose
hest
rott
mudd
jind
erie
mypu
asis
wari
egol
tiem
anic
wwwz
bitn
minn
stoo
afre
myka
tanz
mako
deaf
shij
dier
cnji
idio
jail
cuta
jock
ledg
arct
espn
eyen
lice
betg
dict
lawi
waka
lave
pepe
bage
gris
doli
dolo
outi
gora
ansi
qzon
dias
klei
24-7
e-ch
eatf
gool
ideo
idee
cupo
cupa
donc
elba
mame
daha
gopl
gopi
toow
orca
vind
pamp
nofa
semp
grum
boyi
boyr
sonc
mycu
cura
shev
cerc
nipp
frau
wowh
seom
isti
eres
kits
dobe
vene
gazi
fabi
abat
amtr
noha
tomi
tome
masr
hatw
sevd
my-e
bais
ishe
gosa
duke
erde
furr
trif
deno
onee
smso
devs
smsp
hula
amma
kush
sodi
lebe
vesp
leti
aceh
adsa
oddc
abil
heme
owna
asem
okay
numa
dini
aimw
amad
prok
medy
pago
moco
nasc
nasa
ice-
meid
tiec
geod
loha
boil
ceme
fies
one2
puts
hobo
zinc
alys
duan
etra
leat
mitc
mita
madg
juno
edul
podp
pivo
usba
kido
swat
sedo
0532
guve
whip
myju
knif
oxyg
rawr
legr
tash
trev
esti
evel
teco
tipt
doto
toca
topv
ucan
bizw
icom
myhi
mahi
sawb
datu
dato
satb
toug
weld
byth
elas
tryo
xmas
bros
sumt
itfi
koli
liga
sebe
assp
1111
suha
norl
iamb
sanm
elch
cact
axio
boge
xxxb
bril
seli
gabb
noah
kili
letf
apre
belg
funv
oila
impu
chau
e-ne
kafe
biro
sabo
lube
menc
locu
youj
kish
szji
shub
ofer
alhe
gayt
aweb
hops
lalu
endr
endc
endh
adon
sitc
crun
desm
lung
inpr
cunt
toli
ipro
fixr
airn
zirv
eban
skyd
mepa
cice
eren
viss
web3
web4
poss
sequ
burk
inli
inla
acom
kuri
colu
carj
pomo
tham
24x7
tekt
pras
monc
aure
iglo
pram
buen
ramb
wwwy
reas
enla
asal
kapa
arem
arew
hira
verm
loma
recy
ning
wii-
bugh
helo
sadl
labs
vood
misi
rawg
macl
pois
grew
geli
ace-
yogo
desh
cout
les-
infl
usst
asen
kats
kato
nhat
celu
geng
ekol
rove
onsi
sosp
sosh
regr
cime
lebo
isca
vido
nats
fatf
imed
rodo
husk
ades
arre
arri
swam
inpa
inbe
armt
armb
creo
bono
wwwx
usco
seeg
mobs
buts
blus
myki
vaul
chum
nowg
neal
kasa
gove
kazu
afgh
smst
burr
for-
bera
dieg
ille
sege
perw
idig
nour
cutw
poco
crib
dega
eyeh
lich
seam
365c
jaba
jetb
kook
tous
jobj
usfi
pedo
coca
dezi
pila
pada
wett
weta
ihea
beve
splo
astu
towa
actb
eatr
ispi
ispa
doge
cupc
mamb
mose
fins
vini
sadd
fanr
twen
bilo
itco
sump
howp
ampe
sume
bise
myci
curl
mixh
util
welc
ural
vipa
exte
wade
midd
byma
lapo
isto
gorg
zena
ragn
zeng
dobr
gizm
utop
trab
athl
edis
yiqi
tomc
maze
gece
mygu
al-b
offd
apac
luma
ales
lail
dayr
haoc
my-i
my-p
0451
anyn
zoob
atth
hami
isal
towi
123r
baku
jorg
bate
namo
gapf
100k
alad
alat
serr
klin
inka
gull
woon
joyg
hisf
casp
beya
blok
xinx
aion
sard
hemo
sonh
tobo
rati
nene
akro
geki
cupl
usee
cupr
aimb
klub
rost
empe
empa
empr
medo
weis
jimh
ganj
gani
pre-
foli
itsu
geot
cais
elta
elte
dope
dopa
ames
gavi
mytu
elka
sica
sobo
usbo
atec
ater
win-
sede
arqu
whil
legb
ussa
kays
igre
simu
busk
clem
lotw
lotb
debe
dot-
seor
vigi
bizm
bizo
bizp
yoko
acon
0512
calt
cald
mad-
leem
edon
gowe
dokt
pela
avit
itdi
dido
iluv
plen
mrst
wayr
beha
ibra
domu
brod
clav
grun
tuba
nook
tini
adal
norg
hasl
hasf
goso
hugo
dede
mumb
hotk
krea
alse
1688
lasv
thot
thos
funy
rowl
360m
biri
bira
admo
fit4
apla
doce
pigg
cest
arec
gayr
tapi
endw
sitb
lesa
haul
deso
upli
putr
hual
huai
yili
hedg
nogo
gofa
nutt
adin
siva
upla
logw
loge
tunn
csha
skym
rung
bier
erec
sust
tono
vaga
1818
drip
arar
mayw
bahi
tomh
mola
gigg
bure
inlo
chib
opto
yami
jens
tamm
teke
tinl
onlo
joli
layo
mon-
rubb
neof
neot
yaki
wiza
diel
ared
poon
gowi
vere
verg
wiis
efor
anad
recl
aile
kimm
llam
lomo
hime
nino
albo
gasd
bugd
sadc
labr
inhi
gren
arki
cavi
alt-
ikon
napo
imac
shav
drak
tehn
ecoi
isar
waco
meer
farn
iren
modh
sose
hehe
mywo
konk
hads
gust
mirr
joyn
hiti
saro
losc
homa
usah
usal
usaa
muda
inbu
eve-
telu
calb
oled
shag
bonc
aidl
cumb
dawg
seeo
bobe
hida
sait
enzo
agoo
my-m
erra
bero
die-
skim
myim
perg
pern
pinp
raym
guya
guyw
ouri
giro
yuri
bobm
dasa
betu
dico
tom-
365f
jobz
artj
coch
azer
thez
e-po
eval
eget
sunv
sunu
diao
inwa
asti
rafa
actt
samg
gooo
cupt
ager
hike
alom
alot
retu
laos
itca
keye
puck
flug
tinm
laro
shyu
biso
myce
isai
velv
vipg
limb
oilr
wowl
xxxp
seod
pota
laye
bien
beav
newu
hube
viro
deed
seti
aban
abas
sofo
trau
coke
sued
derr
moxi
dera
tain
musu
amit
vite
diye
maxf
kaki
beep
lepo
yung
9999
cory
algh
fera
sixm
bidu
bidn
noda
spad
nona
123w
123p
bego
leva
aper
daba
gapp
100c
ken-
serc
surr
jizz
trom
kyot
toco
joye
ranf
way2
pols
polk
seap
tupa
adst
acec
oddm
rath
ispo
flai
incr
okan
teki
twel
kinn
danp
coto
ingl
solt
ravi
e-st
loww
maja
ceci
piro
dile
dild
lvyo
acil
yish
koso
itsn
rap-
its-
elim
elin
lech
tray
geon
clik
dens
dopo
refu
getu
0551
drde
gutt
kero
mitr
madu
podc
edub
lora
delc
delp
saye
sayd
culo
atet
kidg
figa
mcco
tyra
vare
vara
taoy
wasw
legf
koca
cam-
alai
camm
caml
busm
busd
herg
debb
uslo
dhar
dotr
dotp
sixh
bizl
jans
jani
layt
xunl
troc
rook
ordi
melt
ruta
lida
kond
torc
avio
kran
mimo
bitg
bitu
0797
notd
myni
manj
defa
tind
sumw
darm
tinn
ushe
yiyi
jaja
iamc
laug
sanh
otok
leis
hask
razz
gosu
xxxm
atmo
guam
kado
lass
lash
esth
simc
dhan
feri
atol
impl
chak
tarp
rewi
byro
hyde
bire
boxn
emot
lubi
evro
fiti
catg
gayd
vacu
lilm
onsa
plur
smsc
e-la
taps
lotf
mutt
yasi
bylo
endn
rami
nado
myfu
tatu
covi
lund
putl
dava
sogr
oldd
cheo
toll
noga
dizz
coho
vete
fixo
angu
dogn
vase
czec
brag
conp
evis
hant
aras
mayg
bori
sixa
dewa
trub
whyi
jong
bedh
frut
amle
xpre
alfo
coss
cosp
djsa
cnsh
bigu
bigk
advo
chig
yara
droi
wonb
kere
quak
hsbc
gobu
slat
anco
befi
ambu
sket
prad
hend
lith
neog
neom
poma
kule
yaka
wize
asam
lols
skid
himi
nint
gaso
gase
gasr
erol
erom
hela
jawa
rawe
kard
hips
hypo
viah
kins
bots
imam
fedt
tico
somi
stef
sico
tada
wats
dran
weha
vins
genx
ecoo
nare
modg
modn
regg
envy
hada
teno
elha
magu
mago
ownd
ownh
fido
fide
hesa
dosu
roti
jins
jinz
gail
jin-
nega
bayb
bayi
asid
myph
haza
cal-
givi
mp3g
decl
wene
noch
esha
arpa
smsb
zack
petd
askk
tany
canu
kase
nudi
it-s
bolu
wepa
sube
fory
medb
zen-
mild
ski-
itea
achi
gron
nect
cutm
led-
our-
ceti
mayr
sult
365e
repi
seag
seaf
bagi
inki
grif
nosa
cabu
outn
syma
anse
wetr
diat
tiao
inwe
brom
amri
actd
sodo
pcre
taho
cupf
ageo
gopo
ress
meiy
meit
hika
alos
bila
how2
liby
darb
lark
boyw
leno
cstr
toti
qiao
joeb
joeh
bode
oilo
abcl
insh
adli
mato
alqu
mido
seof
seok
laps
lapl
seba
isth
kito
swal
upst
ccrr
apor
siwa
dobo
ohan
vena
tax-
setu
abad
uswi
tomd
mazd
lynn
flie
illb
geck
hatr
bluf
al-h
bech
spyc
alea
vadi
fret
haom
haot
nofo
expi
maxo
maxb
gloc
mixr
baiy
tass
iper
bose
bosc
myex
myep
myes
myea
myec
beec
tofi
joos
aish
zoop
zoos
corv
sepi
taha
sept
go-s
lagu
inme
nodi
tees
123m
mehm
dev-
dira
dabo
teri
gapa
gapw
kena
7788
ammo
acci
frit
itun
woot
e-wa
ifli
hisd
hiso
hisw
raha
poto
whea
detr
mool
scro
aceb
adse
acep
oddi
diss
icar
atpa
gunw
tags
edel
goan
incl
upfi
loll
fiat
dind
dinn
esan
schr
igor
amag
gila
daik
psp-
barj
ingo
0597
0592
cepa
dvdp
weat
weid
orli
itri
brut
shei
itla
redv
iner
heil
0574
aydi
rapa
eliz
trax
naug
wale
alya
sopa
mytv
idro
gapl
madn
pods
sask
delh
spie
ibar
kidk
dryd
arga
dryr
wonk
varn
4eve
myja
kori
oldo
hays
trey
onta
camt
camd
prit
busn
lotm
lotc
suan
kroo
ezee
bizr
0510
jano
pesa
laya
layp
eloa
rola
seol
a7la
siga
boco
snor
muab
requ
itdo
satd
exod
didt
coba
plex
pune
bit-
emen
whon
guze
0791
notg
knee
badn
linh
behi
dech
elab
slov
roby
tryr
gouw
man-
gist
brot
bewo
xion
sumr
e-bu
usha
lent
rept
iamn
hasp
mors
dool
meha
juve
xxxh
hern
eton
fior
upho
kada
nift
mybr
mewa
rofl
leop
sinf
eduv
edum
mota
audr
elea
siro
vipd
biov
chae
itba
chaw
upma
migr
djda
babb
babu
benp
issa
wris
apli
cesa
psic
shus
alho
kutu
lilp
saik
avar
reit
ramo
maur
ripe
pham
powa
lesp
lese
lesl
zips
huah
ciao
dazz
dazh
tano
fogg
lacu
chey
tola
seot
jabo
bano
anga
sewa
aldo
whal
runo
icli
adtr
braw
brab
hanm
kalb
arat
mayn
onst
sixr
cede
web1
molo
mire
gigs
humb
frum
tapt
anxi
chiv
lela
wont
wona
keri
wonl
e-vi
kano
kanb
reid
tamb
pomp
nala
aini
nile
monr
auri
suse
soil
rear
drmo
raul
e-te
aref
diea
areb
areo
arep
nerv
wefi
4you
timi
vern
toyt
adra
fabl
dunn
dund
swor
teaa
kung
sige
eddy
drca
asce
karu
kark
macm
cebu
juke
aimi
kios
imar
ropa
""".split()
for index, name in enumerate(POPULAR_PREFIXES[4]):
PREFIX_SCORES[name] = (8000 - index) / 12000.0
POPULAR_PREFIXES[5] = """
china
green
super
video
inter
hotel
power
dream
media
music
poker
world
photo
money
radio
happy
black
sport
games
movie
smart
click
local
great
cheap
daily
flash
studi
image
think
house
forum
brand
cyber
stock
trave
first
group
quick
style
mobil
onlin
magic
globa
share
total
porno
trade
print
plane
clean
adult
event
party
metal
hello
speed
fresh
phone
apple
water
lucky
price
angel
crazy
watch
start
about
desig
solar
earth
clear
space
model
trans
digit
visit
ideal
simpl
sales
prime
micro
white
grand
ilove
heart
class
rapid
learn
light
sound
point
truck
serve
stone
searc
urban
legal
value
smile
trust
royal
track
healt
horse
small
study
funny
extra
sweet
enter
store
india
anime
glass
drive
elite
sugar
grupo
north
night
domai
deals
david
links
under
touch
obama
chris
enjoy
paper
build
seven
googl
drink
porta
right
cross
books
handy
homes
brain
south
guide
credi
three
metro
energ
gamer
vital
plant
realt
final
iphon
human
fight
girls
color
ultra
casin
organ
river
press
virtu
shops
every
asian
logic
order
compu
ready
pixel
train
short
natur
alpha
elect
unite
medic
multi
young
forex
after
table
match
stick
dirty
state
direc
times
texas
promo
frees
today
offer
futur
solid
ideas
cover
korea
brown
marke
voice
ameri
sharp
cards
there
index
anima
sleep
wheel
publi
latin
blues
creat
fancy
email
faith
label
names
block
basic
info-
cloud
offic
maste
motor
beaut
proje
speak
kings
child
goods
skill
techn
trend
place
docto
visio
where
celeb
audio
europ
turbo
linux
never
steve
break
james
peace
stage
parts
astro
ocean
level
sites
proxy
team-
cycle
taoba
large
paint
quote
coach
joint
teach
japan
still
leads
vegas
villa
servi
club-
silve
robot
truth
loans
chang
disco
profi
catch
renta
graph
marks
secur
centr
socia
free-
floor
frame
front
flowe
famil
clock
ebook
hyper
lives
infos
intel
loves
route
peter
round
warez
steel
count
board
infor
terra
baidu
tiger
schoo
thema
sense
score
staff
inves
freed
taste
women
pizza
clubs
stand
finda
frien
saint
vista
golde
check
alter
files
stuff
blogs
lands
compa
faceb
safet
whole
heavy
inner
love-
lovel
dubai
pharm
brian
autos
datin
bears
force
scrap
shift
teens
mundo
hands
bests
eagle
grace
shell
insta
cosmo
field
activ
omega
forma
album
death
broke
close
beach
quiet
court
islam
theme
major
scene
andre
thing
safer
orang
shopp
laser
plays
alarm
pussy
indie
jason
guard
reach
agent
eroti
frank
freem
carbo
crash
films
pages
auto-
salon
sesli
grant
trace
insur
hosti
haber
lover
codes
dance
bella
condo
shoes
lunch
notes
lovem
teams
nutri
story
found
trick
bestb
stars
scott
sarah
tekno
marin
miles
your-
freet
intra
spark
campu
whats
caree
novel
howto
charm
franc
paris
minds
candy
bette
coffe
youth
stree
lovet
freeb
storm
retro
jewel
prote
split
fashi
woman
shine
admin
infob
busin
kevin
finan
lifes
littl
sunny
custo
tokyo
premi
fanta
swiss
teamt
gameb
write
chain
trial
early
march
trees
singl
windo
shoot
finds
chaos
which
cinem
refer
shop-
tripl
linda
acces
waste
union
nasty
shows
colle
livel
maria
alive
focus
bestp
sells
freep
roman
rooms
trash
comic
foods
dates
xtrem
lotto
shopt
freec
karma
petro
artis
banks
hydro
flori
honey
silly
innov
loveb
gamep
chips
easys
justs
carol
spots
lovep
diamo
laura
blood
troop
afric
amber
calls
delta
angle
chill
chair
naked
lapto
opens
jesus
plain
globe
lawye
weare
charl
herba
craft
besto
rainb
title
angry
thene
shopb
kelly
danie
yello
perfe
empty
bonus
peopl
georg
third
porn-
drago
inthe
lines
queen
gamet
bestm
bestf
best-
shopf
tecno
miami
looks
other
syste
easyp
doors
sight
being
autom
lyric
equal
nokia
netwo
clubf
sanal
canal
funds
chine
exact
archi
shopm
bluet
doubl
ghost
live-
limit
londo
infot
olive
scale
justa
swing
shopa
yahoo
easyt
spare
guita
heads
santa
reals
fines
selec
infop
goodb
home-
besta
weird
thera
range
simon
luxur
shang
hosts
easy-
themo
wonde
going
thats
panda
upper
topic
firef
noble
theco
infoc
socce
sprin
shape
justb
justm
trips
gamef
adver
bestt
signs
moder
hoste
liqui
patch
these
pearl
loved
sexy-
biker
quant
cares
yours
zhang
talks
chrom
strea
helps
teamb
sohbe
newsf
clubt
freef
exper
gifts
marry
pocke
iwant
rough
realm
prove
livep
farma
blogg
cools
holly
thebi
atlas
teamf
escor
gamec
bestc
freeh
freel
aaron
fligh
smith
blind
easyb
radyo
tight
puppy
shopc
purep
saves
secre
cargo
autop
maple
sonic
andro
asia-
tatto
sushi
hybri
views
carry
justd
insid
lovec
works
trueb
neuro
shopl
blank
alien
giant
easyc
priva
cable
blueb
bring
garde
valid
habbo
linke
lists
wealt
downl
thank
shark
justt
newst
habit
game-
bingo
stops
posta
theca
bills
fastf
manga
visua
picks
cryst
euro-
star-
wills
treat
livin
webca
excel
theba
walls
parks
homel
newsb
capit
brigh
summe
life-
camer
easyf
fasts
actio
shoph
plans
coder
marti
viral
reali
lovef
tooth
moves
advan
ninja
drops
bodyb
yourc
petit
gross
torre
needs
realb
justf
spell
eight
chica
clubp
bestr
signa
freew
ticke
cityb
cityt
backs
dealt
swift
tutor
whose
conne
hobby
bookm
irish
boats
cruis
cells
thebe
psych
maybe
solve
maroc
chart
bosto
bethe
fixed
homet
noise
newsp
clubb
bestl
lifet
calle
citys
websi
steph
perso
devil
quest
bookb
forth
rates
reall
loose
milli
float
mailb
softs
truly
polar
fulls
juego
thewe
golds
porns
tenni
datab
datas
yourb
facts
massa
rocke
youtu
jimmy
homer
newsa
clubd
techs
oasis
coupo
choco
piano
hostm
tango
emlak
goodp
persi
bluef
chase
feeds
ratem
showb
fruit
norma
livet
uniqu
playm
cause
kingd
theho
penny
conce
thelo
infom
merit
claim
prima
gadge
macro
carpe
bible
busca
italy
justp
teama
homef
gamew
tests
sourc
uploa
safes
poste
matri
trues
easyd
wells
goodf
flirt
bluem
guess
makem
dubli
myweb
showm
bling
craig
euros
artic
whata
grind
cheng
foodb
chess
saudi
mortg
lease
cente
jobst
horny
lovea
homeb
gameh
darks
spiri
knows
sheng
billb
hostb
missi
tript
talen
nurse
aroma
opent
chile
lotus
domin
youre
steam
mails
fires
gamea
monte
banko
rando
livec
theha
flora
cool-
yourp
senio
maxim
carto
infol
grafi
funky
thebo
dolla
justl
goodm
atlan
linkt
namep
hight
homed
chick
miste
diana
clubc
clubm
battl
lifep
freea
ships
allin
showt
niche
mango
mondo
bluec
pound
gamem
autob
kaixi
items
award
bound
softw
firet
comes
pures
euroc
stude
juice
lived
liveb
linkb
twist
optim
dataf
plast
carte
whati
titan
ladys
built
purpl
parad
knock
carlo
teamc
billy
strat
karen
judge
besth
posts
hairb
golf-
vitam
cardi
lifel
divin
cance
words
hostp
jenny
easym
fastl
alice
crown
adopt
goodt
thumb
niceb
siteb
wants
codeb
makes
autol
crack
saved
greek
reald
realp
takes
coral
begin
eurot
starb
victo
bankr
fullm
bridg
dalla
suits
playp
playb
linkf
quali
prett
yourm
yourt
infog
adams
noteb
traff
justc
myspa
nameb
trail
baby-
newsh
talkt
banda
clubl
chuan
yacht
besti
geeks
numbe
callt
thepo
cityl
findl
bloga
demon
teamm
prest
ringt
denta
shopr
bluep
educa
owner
roads
consu
smoke
guild
phase
resto
infoa
hawai
pureb
purel
envir
homec
siste
syria
marco
boots
arcad
basel
joins
cityf
revie
bodyf
overb
thela
matur
clan-
markp
ladyb
socal
justi
teamp
linea
homep
faces
glory
julie
ebony
scout
facef
gamel
suppl
truec
joker
lifec
gamin
tower
freer
henta
tasty
thepa
findm
micha
dealf
egypt
blogm
relax
easyl
honda
skate
costa
imagi
shopn
shopd
palin
bubbl
booka
mega-
megas
autoc
doing
extre
softa
thest
salem
xiang
macau
livea
playl
timet
muzik
kingf
willi
phill
likes
buddy
gold-
polit
woori
mores
conte
memor
liber
mikes
itali
safeb
highw
babys
loveh
babyb
prope
teaml
teamh
gameg
econo
seeks
orion
facet
uncle
techi
beste
theta
islan
might
findf
brick
deale
easyr
easyh
wellb
bliss
fotos
locks
nicef
groov
voter
roots
blueh
since
picka
openp
jacks
firma
bookf
sexys
spain
incom
realh
gofin
explo
monta
fullb
cashi
playf
passi
flexi
opera
bodys
progr
datat
coast
overs
avant
dutch
sunse
backb
wired
contr
good-
newss
everb
evers
gamed
truel
sellm
later
cheat
lifeb
tamil
calla
onlyf
minic
lower
canad
fileb
lemon
users
billi
twitt
josep
filmb
goodd
sitep
tripp
sures
codet
beyon
commo
chief
indir
rober
onthe
softb
provi
forms
play-
sheet
nextf
fullt
fairy
liver
filma
fisht
youxi
playe
linkm
bearb
mindt
kingb
kingp
broad
astra
cellb
blogt
blogc
downs
infof
indig
mixed
moreb
suite
impac
conve
scrip
justh
drama
goodl
namet
highs
labor
teamr
turki
homeh
terry
tampa
cityp
footb
cluba
tech-
spend
ideab
therm
barba
citym
jumps
trueh
mouth
alask
verde
keeps
wikit
missm
misst
shopi
thego
suret
alert
namel
girlf
webde
secon
bookh
ringb
merca
realr
genes
rebel
mains
maine
jeans
overt
puref
throw
nancy
issue
webho
10000
fishf
kitty
playt
linki
brook
weste
tunes
kingt
blogo
stroy
infin
bodyp
helen
mario
datap
youra
facto
theli
markf
amazo
germa
whois
talkb
sides
bloom
wallp
entry
spice
teamd
easte
catal
choic
clube
sends
carda
sellt
summi
paula
shopo
musta
susan
cityc
minis
softp
compr
peach
dealb
apply
hards
truef
unity
onlys
fundr
fitne
resum
fastb
austi
pinoy
machi
justr
westf
missb
tripf
dress
bluel
pirat
bookt
bookw
autoa
bilgi
allst
reale
grave
steal
huang
artof
fireb
firel
showe
arabi
grill
fullf
juicy
randy
toyot
livem
heath
kingl
moral
datac
belle
strip
ladyl
porto
token
monke
jobsp
helpm
piper
linkp
facew
fusio
newsc
eastb
metas
drugs
bandb
clubh
techt
bestd
bestw
postb
valle
thegr
grows
busty
seoul
sellb
deser
berry
freeg
everf
brief
minor
iklan
shado
thehe
citya
johns
filet
hypno
hardf
perma
camel
hostr
spoke
easye
fastc
faste
hotma
fotol
webma
cooki
filmf
outle
stron
disne
allen
sitet
sitel
sitem
thear
missf
fours
thega
ortho
blue-
murat
blond
drunk
longs
racer
longh
error
robin
hipho
longf
wellt
openh
openb
open-
codef
tools
fortu
realf
loveg
blast
softl
caref
month
trunk
prost
westb
bankl
fairs
thecr
cashc
stamp
lasts
playa
playh
chann
hydra
handb
pools
timec
scree
buyer
kingh
kingc
cellp
cellf
churc
datam
letsg
tiany
growt
overl
overc
yourh
zhong
thebu
woool
punto
rains
rocks
wildp
landl
wallf
hopes
helpb
safec
bolly
named
babyc
wicke
areas
mafia
choos
danny
bands
moref
leadb
cardb
carde
ables
lifeh
freev
vegan
city-
findb
findp
minim
fastt
freak
shelf
truet
web-s
onlyb
onlyh
hostf
hostl
hostt
cherr
themi
beiji
goodc
polic
anti-
pager
diver
sitef
sitec
pinkp
coded
missl
bluea
girlb
easya
surel
decor
eshop
opena
codep
sexym
ciber
expre
votes
galax
lovew
softf
inspi
showa
engin
bursa
caret
pure-
purec
fullc
liang
finef
fairb
fully
fishi
linko
natio
link-
verys
virgi
equit
stati
aqua-
thena
shame
coolb
infod
markb
carre
filef
datef
landm
newyo
chari
thefa
thefi
realc
jobsa
goodh
lovek
homea
talkm
engli
julia
chuck
darkb
tanks
hairs
pilot
mailm
callc
known
findt
cheer
proof
truep
proma
hosta
hostw
raise
wimax
pulse
atomi
muscl
alias
jorda
easyw
fastr
bobby
steps
crowd
spide
leave
loads
foodf
cafem
penta
regio
copyb
cream
helpf
fatal
blued
virus
bytes
fastm
longb
cigar
aloha
openm
openl
openf
openr
salet
hells
salef
saleb
frenc
audit
telec
porna
bruce
carib
textb
maill
hugeb
formf
whatb
blink
thesh
puret
bankc
banke
bankt
thech
liveh
fishl
farme
linkw
linka
linkl
handl
timer
timef
fores
mindf
kingw
blogi
blogf
hotse
everl
shirt
milan
manda
bigba
datal
datah
mynew
infoh
marka
markt
bazar
mommy
sureb
paras
chatt
ladyp
derma
weddi
paren
getti
thedi
namem
ecolo
wildb
scanb
teamo
newsr
newsl
evert
venus
deepf
toxic
salsa
facep
leadf
nexus
stell
stuck
circl
cardl
sella
ideap
killb
crush
textm
silen
thepi
findi
deall
dealp
unive
galle
budge
tommy
bigti
hostd
hostc
denve
sunda
digis
prize
musik
types
selfs
circu
easyg
fastp
fastw
wellp
foodm
nexts
logos
foodp
opend
nices
pageb
keyst
bomba
siter
edges
mount
plano
awake
towns
shall
legac
krist
balls
comme
megat
smash
cedar
avoid
saver
savem
conso
jacob
repor
reala
real-
parkb
gener
dynam
showf
shock
janet
comed
comet
thesi
hacke
orego
bankb
fineb
finew
fairl
macao
larry
fishs
orbit
chanc
clark
amate
timed
kingm
willf
thehi
likel
succe
cellt
blogb
sorry
colds
weekl
bodyc
mini-
datar
brazi
nicep
overf
teeth
markl
races
agenc
scien
westc
wests
rainf
landw
wilda
landb
moret
distr
conta
ladyf
facel
pink-
paran
jobsf
jobsc
safew
namef
namew
highp
highf
linep
wilds
lovee
parkw
areal
raven
worki
webco
everw
truem
monst
expor
darkp
leade
postr
ideat
falls
jokes
lifew
lifea
lifem
shopw
freen
thero
fetis
mento
callf
callm
actua
johnn
filem
backt
hardb
lookl
onlyw
thisi
funda
woods
longt
wellf
treel
aucti
auror
chemi
filmt
tetra
primo
sitea
tripb
tripa
kuwai
touri
wines
thedo
longc
input
openc
copys
codec
slick
twins
booki
bookl
bookp
bookr
ihate
loanm
loant
boatl
feeda
forte
busyb
babyt
diese
parki
senda
softd
firep
firew
shown
rescu
amero
comer
celti
thesa
idaho
syner
bride
mycar
livef
livew
optic
testb
thewi
lastf
morel
playw
clari
slide
timel
bitch
newco
partn
youpo
cellw
blog-
ventu
foodi
tells
marie
letsp
coolc
youri
laure
bites
viewf
thebr
squar
sigma
filed
westl
landf
chat-
agora
emily
needa
rated
verti
just-
helpt
pipes
nameh
highb
wildc
parke
turke
homew
homem
prosp
flyin
newse
eastm
eastf
pride
kitch
deeps
legen
clubw
darkt
leadt
reply
hairl
amuse
sassy
gives
cardp
diabl
cafe-
scuba
produ
tribe
grass
flick
alpin
fiber
cityw
findh
softc
starc
lookp
their
truew
champ
pimpm
webst
entre
gator
birth
maint
stays
justj
deadl
nicet
costs
lifer
triph
resor
ghana
harry
shopg
girlt
codeh
codel
codem
carme
sexyb
autor
loanf
spott
savel
saveb
above
boatb
sakur
bikin
newma
hills
textt
getst
getsh
mailt
softo
mobic
widge
firec
fired
teamg
careb
fourb
theso
purew
gameo
starf
colla
cores
pacif
phoen
feelf
cash-
thewa
qatar
lastl
fishc
heats
playr
playc
linkc
treet
poolb
sandy
timea
timeb
lista
screw
willo
parti
viewb
softm
barte
liveo
cleve
surve
theno
journ
bodyt
data-
folks
letss
coole
infon
heres
proto
sitew
rally
buymy
patri
wildt
talkl
talkp
farms
walla
outdo
thefl
winte
centa
deskt
somes
gooda
cotai
safem
highl
wikip
highc
wildw
cabin
teamw
teamn
castl
casad
getmy
peaks
civil
showc
darkf
techm
haird
thegi
cases
thede
cardt
selle
sellw
kille
freeo
evere
calli
roadb
textp
thepr
the-m
findw
backp
winds
halfb
shipt
billf
telco
gypsy
mines
divas
fast-
rings
ringf
login
desti
sampl
viagr
filmp
deads
antiv
arche
edgef
sexsh
manag
fives
missp
bluew
headb
topse
suref
racef
longl
salep
ballf
bikes
movet
crisp
kiddi
loanl
loanp
teles
savec
const
voteb
pornt
spect
petra
louis
realg
carma
sames
cloth
mailp
mailw
mailc
soft-
mainb
nextl
nextr
carel
amand
bryan
starl
saleh
arena
taket
evens
elder
cashf
cash4
feels
reads
older
nikki
donna
versa
infra
nevad
dropf
kenny
treep
liste
prepa
king-
sofia
wildf
cellu
cella
foodh
foodl
foodr
bodyl
dataw
coold
coolf
pictu
overd
yourl
racem
russi
yummy
markh
amazi
thins
magna
datep
datel
ultim
barra
doggy
futbo
prodi
wallt
hispa
princ
kathy
goodi
safed
scant
scans
nitro
newso
newsm
tweet
jooml
flame
rides
tibia
facec
facem
garag
madei
apart
foots
adven
techp
postp
postf
hairt
hairf
mysho
selli
sellf
mater
diabe
sellp
ideam
aspen
pureh
freek
freee
flats
callb
songs
texta
textf
cultu
spafi
cityh
findc
annie
minin
minib
flowb
athen
dealc
netco
dealw
lookf
lookb
lookt
billr
thoma
artre
theto
makin
prome
pasta
zebra
targe
foodt
arrow
adapt
ahmet
ringl
chili
fabri
renti
alone
filmc
goodw
recor
nicel
lazyb
pagel
paget
roger
pinkc
integ
agree
tours
ajans
harri
signf
bluer
girlw
gazet
headl
voten
theda
rekla
openw
bizim
maket
balla
snack
roomf
izmir
loanc
bigre
spotf
savet
votew
pornp
belly
feedb
scand
ratef
rateb
megab
realo
carin
drawn
diner
turns
softh
hears
heard
whatw
formu
nextb
nextc
nextp
thesp
purem
eurob
rules
telem
starw
moved
taken
hurry
sneak
banka
bankf
finec
fullh
daisy
treef
treec
lastb
lastp
getfi
fishp
farmf
morem
answe
playd
dropt
bigfa
outer
winea
ports
baseb
tuneb
mindp
mindh
mindb
kingr
bucks
thehu
candi
goldb
sevgi
downt
downb
bodyw
datad
exoti
meets
kinky
massf
infow
marko
useds
insan
thanh
viewt
tropi
vogue
vinyl
carri
gotta
thebl
dateh
votet
buyme
landp
landt
landa
landc
newli
plusp
plusb
mylif
ladyt
portl
jobsl
helpl
safef
clima
sunma
buyse
parkf
empir
redco
termo
deniz
homeg
gamen
chico
news-
everh
everc
everp
seekl
seekb
henry
thein
getme
aplus
gamez
pornh
bandh
footl
footf
clubr
darkm
techa
bambo
leadl
postl
postc
colin
basec
panty
lifef
shop4
shopk
shope
hunte
bigma
kills
stopf
flatf
callp
barbe
famou
hangs
hanga
the-c
gains
tende
johnd
backf
dealh
dealm
onest
lookm
bille
holid
amigo
onlyd
surge
theor
showl
fasth
fasta
fotog
foto-
warma
viaje
fooda
filmd
filmw
nicew
massb
pagef
alsha
moons
teen-
fiveb
ilike
glamo
shop2
herbs
hotte
bluen
girlp
artes
movil
surem
longp
erica
makea
capri
ballb
booko
banan
sexyc
autot
crist
roadr
megad
yourw
doyou
parkp
shout
texts
hocke
takea
bravo
weblo
showo
breez
mainc
nextm
nexth
nextt
fourt
sophi
eurod
meeti
banki
bankh
finet
finep
fairp
heave
noval
boost
redst
packs
readl
the-s
shish
handc
winet
timew
listp
listl
listb
veryf
westo
golfs
tuner
tunet
willt
regal
ipods
blogr
sinem
eastw
flore
foodc
weeks
downf
bigbu
bodyr
zero-
deai-
sunri
letsf
growb
cooli
overh
sexpo
jones
markm
wwwca
surep
rabbi
viewl
carro
westm
ineed
datea
eleme
lando
spicy
talka
morep
plust
ladyh
sharo
notep
notel
desks
justg
juste
justo
justw
helpw
safel
safep
linec
lovei
babym
babyl
rache
parkc
teami
siber
testf
sandi
baker
easts
eastc
seekt
usedb
taylo
motel
nordi
travi
inver
metac
faceh
trini
footp
ditto
adria
clubg
techc
leadp
bestg
yougo
posto
postd
wwwma
dogst
cardh
cardf
thetr
nette
moore
lifeg
seema
flatb
telev
abbey
remot
react
dayst
findr
packf
flows
wanna
johnb
filep
chiro
remix
hardl
shipb
billp
smoot
gossi
promi
onlyl
onlyt
miner
keepf
vietn
themu
suppo
hallo
attic
filmi
filmh
filmo
filml
filme
deadp
deadb
quite
almas
draft
barst
nicec
sited
pinkt
costc
geniu
newto
websu
fuckm
hotto
senso
planb
planm
signl
headt
headf
zerot
headc
sexha
omaha
wayne
bulks
townl
copyw
copyc
holes
tulsa
ballp
newte
tempe
templ
megan
mille
autof
autod
roadp
coreb
jessi
rural
savea
votef
allse
acade
realw
yourr
harmo
jerry
bunny
stepf
softt
softr
whatf
showh
thequ
finem
comel
carea
purea
stard
stark
starp
corel
takef
bankm
bankp
finel
fineh
fairf
talkf
jamie
cashp
cashm
treeb
netlo
slash
xango
fisha
mored
farmp
bootl
means
cosme
dropl
dropp
handt
listm
fuzzy
veryl
passa
avata
stats
mindr
mindl
mindc
sexto
cande
cocoa
bloge
falco
aquas
coldb
getpa
downp
sexvi
gospe
bigbo
scanp
letst
zone-
quint
buyin
coolm
yourd
clien
azure
whatp
cobra
pitch
viewp
westp
hidde
datew
dateb
sunsh
pickt
carpa
safar
talkc
plush
needt
needf
portf
compl
weigh
thefo
paypa
flagf
flags
jobse
jobsi
jobsh
jobss
lotte
carla
highe
linet
linel
curef
love2
loveo
buysh
areab
teame
tripc
walke
tyler
gamee
prose
testa
teste
testm
wowgo
noisy
newsg
easta
asset
timep
mysti
inven
jerse
facer
hotca
clubi
mexic
5star
darkl
darkc
melod
krazy
posti
miche
stopa
flyer
givem
cardo
cardc
sellh
ideaf
ideag
ideac
lateb
lifed
lifeo
shema
free4
sexma
theri
loopt
find-
cityr
findd
finde
topla
topli
minip
thatb
mobis
lovin
shaba
plaza
filew
filec
looka
shipf
billc
chipp
manya
truer
april
formo
onlyp
hostg
hosto
pasto
brush
thist
fireh
below
rusty
typef
taiwa
proca
fastd
wellh
aquat
denti
kenko
goldc
hotpa
lockf
lockl
lockt
goodg
muzic
antic
boxin
nicer
lazyl
pagem
pagec
pinks
edgeb
carst
missh
missd
alban
votep
blaze
nomad
jiang
blueg
guest
headp
byteb
bulkl
raceb
longw
townb
racep
nomor
melon
katie
openi
openg
erics
sideb
stopt
proba
stopb
nearb
baoba
allow
bookd
tempo
megam
false
sexyp
babes
fullp
maggi
payda
room1
allma
loanb
loana
spotc
grati
pornb
mania
speci
feedm
feedt
overm
ratep
terms
ident
parkt
sendm
turni
tranc
firem
showp
carep
caren
netde
fourl
eurol
cisco
hackf
starv
core-
coref
coret
messy
rogue
sydne
finer
fulla
morga
fleet
readb
readf
readm
lastd
fishb
buyca
tubes
morec
baron
panel
cosmi
linkr
linkh
handf
handa
handi
handp
musli
spotl
bearp
newca
baset
hitst
wildr
mindm
minda
sextr
bigdi
refle
glenn
toplo
nicol
celld
asias
wuhan
hotst
bodyh
zeros
girlh
girlc
tellt
boise
ether
signp
nicem
grown
growl
airma
agile
mynet
thele
thund
marki
compo
tibet
kayak
surew
vieww
zombi
carra
fills
dated
ateli
haina
buyma
param
sudan
lions
chats
meili
carpo
again
ninef
plusl
ladyw
wallw
chars
deskb
guang
jobsm
jobsr
seria
hopet
helpp
objec
namer
olymp
linew
linef
twice
wildm
scanl
parko
outpa
testp
gupia
bidst
bigst
usedf
essen
winne
canta
bearf
deeph
deepl
deepb
redma
diala
impro
darkw
dark-
leadr
postt
posth
hairp
vivid
batte
sendb
cardr
latel
somet
colon
coupl
parsi
pushp
menta
knowt
myown
the-d
cityg
cityd
divor
biote
johnc
filel
xinyu
deala
tripm
hardw
billt
usedc
tecni
combo
cellm
gastr
pedal
nanoc
nanop
host-
onlyc
thisb
alexa
funde
digim
keepc
proce
websa
selfl
mybes
corne
wellc
someb
cupid
ringp
resta
oscar
dogma
thecl
goodr
film-
timem
keepl
masst
massp
massi
pagep
keysh
siteo
siteh
site-
boome
sidef
lostl
misss
skinn
tripw
walka
wwwse
hours
prior
otaku
planf
girly
mypro
racel
longd
pickp
intim
codeg
lostb
losts
sider
firmb
twink
newtr
makeh
makeb
makeu
booke
haven
sexxx
ishop
lloyd
auton
youle
antar
mover
gothi
wikis
roomt
loand
roadf
roadt
savin
orien
telep
stunt
savep
pornm
pornf
votel
boatc
boatf
eleve
youro
megac
babyf
lovey
lovex
wides
caffe
artma
norca
maila
mailf
detox
mail-
megar
turnb
triad
mobit
softe
lucks
birds
firea
showd
baran
sheep
nexte
nexta
sexlo
bigli
biglo
cared
purer
eurom
starg
starr
toner
movel
movem
salel
corep
corpo
ahead
samar
takem
fined
aussi
casht
cashg
cashd
engel
savvy
lakes
shiny
fishe
frost
fishw
heatb
morer
playg
playo
manti
dropb
dropc
ibiza
scoot
poolf
globo
etern
beart
bearc
listw
listf
veryb
veryd
verym
mothe
passt
passp
based
surfs
joinm
golfb
shake
willy
viewm
cando
konto
wendy
eyeso
celll
blogp
asiat
tatil
votem
foodd
foodw
hones
defen
polis
coldf
grade
thaim
youyo
dataa
seekf
mouse
meetw
overw
factf
wings
sexpa
markw
wwwco
allfo
equin
kidsc
viewa
thint
thinl
senti
corre
canna
teent
magne
datet
datem
youku
escap
talkw
ninet
ladyc
peakf
needl
walle
portr
notet
genie
chron
lifei
jobsb
diskb
just4
hopel
pushb
jumbo
monit
helph
honor
holds
highd
facil
funta
buyst
bruno
roomb
homeo
dieta
worka
netra
seekw
toptr
aztec
ridet
metap
rende
forst
deepw
deepc
hotco
expos
bandp
bandf
footw
al-ma
darka
panam
bestk
hairc
stopd
joinf
sendt
sexch
boobs
sendl
givet
jokef
ablef
agest
cafed
darkr
signb
hunts
packa
freez
triba
everd
brasi
calld
callh
narut
textr
voyag
barbi
allth
loopl
netma
flowt
flowl
flowf
chees
jumpl
jumpb
jumpf
raint
backl
rainh
bases
madma
onesa
hardm
hardp
thais
halfs
shipp
retai
junkb
ashle
allco
pagea
minef
lance
kindl
heidi
ecoco
keept
proco
webse
getco
vietc
airli
youse
easyn
wella
ringc
refor
warms
queue
tasks
filmm
filmr
rockb
momen
coldh
deadw
wwwba
lazyf
massm
westw
spart
teddy
wanta
shutt
edget
sexso
fivef
lostp
missa
miss-
khmer
mapqu
onlym
knigh
ponto
shane
koshe
coven
artem
safeh
yourf
raceh
newbi
copyf
hellb
ozgur
recip
anton
firmf
commi
betty
bookn
townf
davis
netse
switc
horiz
moveb
capta
pauls
flesh
mantr
spotw
spotb
carba
saveh
pornd
conse
pornr
coolt
valor
olimp
feedf
ratea
ratel
busym
campi
campa
shouj
hillf
hillb
humor
huges
softg
alist
reiki
newho
nextw
alltr
comeb
befor
carem
thesc
pured
barco
arabs
colli
drift
corec
corea
saman
takeb
aweso
redha
metho
cashs
casha
feelw
likef
likeb
likem
likew
skype
optio
stein
lastc
rolle
lasik
chand
sandr
winec
bearw
bearl
listt
veryc
basef
libra
hitsh
tunep
mindw
kingo
kinga
willm
willb
eyest
cellc
livee
packe
blogu
casa-
goldp
cindy
goldt
avenu
prese
coldt
tella
letsc
ancho
webpa
growf
growa
meetp
overa
yourv
coron
thelu
daddy
lasth
equip
groun
viewc
advis
rugby
sunst
mebel
doggi
chate
talki
canli
prode
portb
houst
thick
chara
charg
cents
somef
scrib
goodn
bored
clips
pushf
cater
caifu
thejo
holde
mostc
youme
buyso
parka
areaf
teamj
faceg
seatt
testr
testw
widef
testl
workb
chich
workt
netre
ridge
youca
newsw
newsd
eastr
seeka
mades
artby
slows
ridep
metam
airst
madel
moose
bande
growm
slowb
postw
postm
hairh
hairm
flyto
basem
wilso
joinp
slowf
webto
buste
given
cardm
cardw
selll
selld
idear
witch
favor
crime
topho
getit
thete
netto
lifen
hepsi
hunti
huntb
seeme
porte
freei
flatl
motiv
shade
allta
quadr
schol
the-b
daysa
findo
findg
theki
userf
userl
winde
thevi
dealr
netca
notef
pleas
onesh
metra
motos
osteo
confi
halft
halfp
lookw
shipl
clove
chipb
piece
richa
richm
sexca
thish
thisp
fundp
henan
racet
pulsa
zirve
blade
lumin
mason
selfb
selfr
ahmed
surec
thait
longm
fastg
plati
bigca
eclip
asked
webre
penpa
expat
jungl
fotom
lucas
gaspa
suntr
taska
remax
shawn
antig
pageh
unico
theas
theat
edgec
carse
edgel
slims
sexsi
appli
wikim
missc
missr
walks
foura
somew
helpc
pulls
silva
hottr
clini
plana
rootf
zerob
youha
bytep
bytef
movin
townw
buypa
airte
askma
welld
afgha
stopl
hiper
stopc
coope
ballc
balle
drill
comma
redbo
snake
jobma
bookg
could
starm
carmi
sexyt
netso
netsh
netsa
ringm
accen
notar
marty
salew
mydog
apoll
roomp
loan-
loanw
joyce
repai
savew
book-
aware
wegot
rock-
newpo
rockm
rocki
feedr
ratet
bigpa
ecoso
agape
morph
draws
drawl
wideb
catha
newme
hillt
barga
getsa
getsc
getse
sexbo
lider
hugel
hugef
betsp
luckb
whatt
forme
magaz
webli
happi
maina
nextg
nexti
butte
carer
carec
elvis
eurof
fairc
overp
bless
stari
satur
movef
arab-
nears
redho
cutie
meetm
fairw
barma
blend
cashb
treeg
redse
liveg
bigho
novat
reada
readw
vande
taskf
lasta
buyto
lastr
fishh
heatl
farmi
farml
certi
bootb
revol
hides
linkd
clair
netbu
madri
netba
handh
handr
poolp
armor
bearh
westr
listi
basep
ethan
joinb
joinl
joinw
westh
statu
willh
wille
partc
phila
webin
saigo
eyesh
cello
using
blogn
blogd
crave
aquac
reade
coldp
coldc
irani
carve
downc
adobe
mando
discs
coolp
meetb
yourg
massd
rever
cartr
infoi
oldma
acqui
femal
markd
markc
markr
wwwch
viewr
keith
thinb
teenp
daten
rockt
lunar
pickl
chatl
funma
morew
betlo
plus-
calci
ladym
cowbo
needb
noted
topsh
vecto
jobsw
justn
climb
nobel
kenne
lineb
toppa
scanf
babya
parkl
israe
yesil
diete
payla
easel
testh
billa
bioni
chung
nauti
emedi
newsn
eastt
everm
nutra
puzzl
getmo
netpa
ridec
beads
faced
forsa
madef
peakp
cyclo
peakl
hotch
clubo
clubv
rhino
harle
clubn
sendf
techf
netvi
postu
haira
loung
caset
keyfi
alway
ideaw
idead
fallf
latex
lates
webte
rootb
carsa
huntf
signm
supre
modem
purse
relia
haosf
roses
grupp
callw
callr
calll
texth
textw
veryt
knowf
ethni
reise
thepe
rares
michi
buyba
ringh
flowp
cheek
qzone
rusht
johnh
backw
backr
backh
backd
backa
windi
notec
hardc
penis
halfl
billl
chipl
donke
unitr
beast
cameo
nanos
thatp
hosth
poets
paste
thisf
thisw
cliff
fundi
slips
racec
digip
digic
ourlo
getca
meand
easyo
doorf
austr
pickf
topre
fitlo
lamar
stupi
tapet
fotoc
eco-s
stepw
alber
wildg
halla
teatr
stayf
stayb
taskp
claud
lockb
snapp
haute
outlo
deadc
updat
troll
barsh
niced
nicea
keyse
keysp
gettr
mosts
dotco
polls
cars-
edgep
carsp
fiest
silic
sexst
misso
walkf
gotst
planc
planh
artec
bulkb
thedr
longr
townc
longe
funfa
stack
pickb
namea
canst
welll
codew
copyl
lostf
losth
essay
keens
webdo
sunlo
thath
thatf
ballh
bookc
xinhu
appra
megap
yoshi
babel
sexyl
oracl
pause
netsi
netst
autok
slack
tunel
arizo
buytr
airpo
theyw
jesse
carbi
savef
getli
mostl
boath
boatm
cures
venta
feedl
feedp
fortr
renew
meeta
overr
dogca
sexli
realn
proph
intot
termi
wider
viper
sochi
dolce
mycom
mailr
fanda
hugec
brave
oneta
argen
ethio
formt
thril
pillo
thong
seese
bankw
nextd
turka
next-
netha
blame
comec
styli
fourp
faird
youdo
hacks
eddie
hackh
fulld
artin
degre
arabe
corew
fullr
fullw
dearp
jolly
deard
dearl
bumbl
feelb
angie
packb
packm
readp
grani
kolay
lastm
lastt
heatf
topca
meant
booty
blitz
playi
gofor
clare
asahi
scoop
netbi
handm
fanli
sande
wine-
roadw
time2
winem
timeh
spotp
listo
veryw
sacre
jorge
edwar
golfe
raret
admit
mindd
viewh
bigdo
likeh
eyesp
cellg
packw
dayto
appar
aquar
restr
quits
worth
coldl
casal
sahar
mandy
bigbi
bodym
bodyd
girlm
body-
letsd
letsb
letsm
letsl
discb
rehab
weart
sexse
monav
hunan
coolw
carta
hotlo
graff
sling
iraqi
sunho
kidsa
dogal
dogan
kids-
couga
tracy
teenc
westt
ginge
sidel
avalo
landr
userm
aster
caste
talkh
talke
ninel
outof
plusw
plusf
plusc
ladyr
mecha
gotom
rateh
isell
compe
bizne
somer
jobso
justk
pusht
getto
hopeb
hopef
binar
uglyp
uglyb
fitca
helpe
chapa
helpa
helpu
safe-
carle
safeg
nameg
namec
manna
highh
lined
aeros
babyd
buysp
parkr
propa
elekt
joblo
aread
areat
teamv
teamk
casca
canva
glads
prema
newsi
bitst
eastl
alllo
everg
seekm
seekp
stran
goldm
vapor
diane
ridef
hitma
game1
facea
porni
deept
madeb
afrik
bandw
singa
bandt
bandl
footc
civic
dialo
adore
darkh
bestn
bestu
casey
manyb
sellc
signw
netta
cafet
oursa
tinyt
oursh
supra
ranch
modef
samba
killw
killf
ibook
flatc
carbu
dairy
artha
musts
mustl
offsh
knowp
knowh
sonia
logan
loops
riskf
the-p
gainf
cityo
aspir
minif
minit
flowr
flowd
softi
rushf
filer
casel
backc
userp
sexin
userb
kanal
spoon
mirac
beyaz
metri
hardh
hardr
halfm
lookc
looko
jazzy
billh
billd
huaxi
cache
chiph
whore
units
webuy
onlyg
onlya
hostn
airba
sexco
jenni
jenna
gizmo
theon
kinde
sexyg
tires
sexua
goget
relat
phant
wrong
keepb
keepm
navi-
sexyk
cinet
lesbi
selft
sureh
doorb
doorp
wellw
addre
ringw
oldba
fotor
fotop
fotof
fotob
czech
mytop
sunta
exist
flood
comun
stayp
loadb
droid
itsmy
taskl
filmg
lockc
locke
lockp
flips
deadf
antim
aller
lazys
masss
lazyc
nettr
espor
westa
teenb
siten
amore
west-
weeke
thead
wantp
wantt
wante
kazaa
pollt
pinky
topta
edgew
oldna
pro-p
coden
losta
autis
shop1
tripd
walkm
shopv
corte
funlo
sensi
planp
planl
rootl
signt
armyb
zeroc
girld
vocal
bytec
gotlo
devin
racew
longa
kelle
pickw
pickn
lovev
washo
coppe
manma
newbo
codea
lostm
sidep
hella
stopm
redif
keenw
kansa
wwwde
karak
bigtr
delay
borde
sexdo
valve
discu
jiaju
veggi
submi
buyre
bistr
autog
autow
youli
butto
flatt
racin
buyth
brass
spoth
telef
saveo
seaso
smoki
artco
boato
boata
warmf
boatt
anita
bread
rockn
allsh
lvyou
ingen
ratew
mercy
hanna
vendo
impul
drawt
prone
netwa
netwe
camps
biome
getsi
getso
mailg
mailh
sexba
hotba
grape
hugep
oneto
banga
gayse
finis
bisni
forml
firer
seesa
seesh
mello
risin
bigle
analo
estat
finea
carew
pureg
hackb
fairt
nissa
salec
coreh
hotfo
catho
sexfo
takel
takeh
sierr
carca
bankn
fine-
fairm
sauna
topmo
brida
treem
redsh
delux
newne
spray
vault
liket
packl
readd
readt
netli
thewo
babie
last-
lastw
onere
hasan
tapes
rolls
fishm
sexfi
farmh
cashl
locka
poptr
lazer
treew
aeroc
blake
turna
hando
handw
menin
wineb
whyno
veryp
forev
joinc
al-sa
orlan
rareb
immo-
xinxi
willa
willc
revis
parta
parto
phili
pack1
lendl
candl
kredi
cocos
eyesa
celli
cellh
celle
softn
smack
nanny
hotsp
welco
foode
colda
becky
irans
skins
mandi
netfa
netfi
girll
tellm
aliba
mysex
viewe
datan
debts
discf
wearw
webpr
coolg
meetf
meeth
plasm
natal
carti
turis
infok
infoj
infov
yoube
voipc
netdi
herew
heref
modul
jumpd
parkm
hotwi
naugh
thinc
theac
advic
achat
dater
edito
gatew
rocky
lande
landh
balti
chatb
voodo
casti
talko
talkd
nines
nineb
drupa
medya
wallh
wallm
portw
portp
hitec
gotop
least
pains
keypa
youto
lifts
pathl
deskp
deskl
deskf
deska
deskc
flagt
rideb
yogas
newwa
hopec
helpr
queer
carli
emule
bethi
absol
onema
highr
linen
artfo
dixie
mosta
mostf
abuse
youma
betst
forwa
mazda
denim
sex-t
payle
myhom
widel
testi
proud
newsu
eastp
seekh
seekr
asses
usedo
edita
baile
sober
raref
netpr
leafc
leafl
metar
bioma
jelly
gamev
deepp
deepm
meish
bandi
ecore
curre
impre
artlo
techd
techl
verve
leadc
leadh
lowco
hairw
beers
casep
ferra
trump
truen
tunem
givel
pagew
webme
tradi
getin
cafec
esthe
manst
gemin
free2
karan
karas
onewa
tutto
knowd
knowa
knowl
knowm
vorte
festi
nepal
thepu
wager
the-g
flavo
gainb
origi
blura
airfa
aslan
paths
lexus
paten
redro
wwwbe
barlo
rushb
johnm
rainy
rainl
windb
junio
notea
darkd
hardt
buylo
shipw
billo
barne
thism
locat
comba
wordw
wordp
chipt
manys
artst
artsa
timbe
banne
formb
taoke
hidro
slave
nicks
sweat
chamb
alex-
physi
omnis
alexi
fundt
kinds
sunde
digi-
ecoca
ching
until
typed
keepw
allab
airlo
airle
thaip
airta
divad
welln
wellg
velve
wisdo
getba
outba
stepa
stepl
bobbi
ourst
camar
kissm
warmc
ehome
badge
midwe
deada
antip
wwwbo
keeph
niceg
niceh
massw
sketc
paged
nice-
merid
mapsh
clant
netth
airho
leath
theam
theal
wantf
summa
pinkb
canon
carso
costu
myfre
sexsa
sexsp
pushs
pushc
fivec
lostw
missy
missw
bottl
tripo
walkl
walki
perse
winer
thege
thegu
burge
signh
payst
oneba
denni
blueo
headw
estra
modes
bytet
bytel
bulkc
bulkf
filte
townt
loven
reddi
thatw
picke
hayal
askme
long-
jackp
copyt
sidec
keenc
thatc
derby
maker
makep
dayca
itune
askpa
letsh
commu
megaf
havet
hamme
antal
saler
rater
paidp
gotha
newro
roomw
manta
loang
loanr
betra
theys
roadl
roadc
banco
alaba
telet
payme
zheng
pedro
poppa
lesso
lessa
lessb
itemp
indic
bugsh
boatw
patro
forti
allso
griff
gravi
letme
drawd
drawb
intob
widew
newmi
warre
hillp
texti
getsp
getsm
sende
loyal
wayto
turnt
turnf
redli
poshp
hoops
mobim
mobi-
airwa
formp
cairo
fargo
showw
seesi
fifth
youlo
comeo
comef
comew
comep
careg
webbi
fourm
thesu
eurov
euror
eurog
warmw
hackt
hackw
wheat
indoc
bitte
stare
movec
arabm
corem
messa
homee
dange
takep
takeo
gunst
fulle
dears
dearm
newgr
buyan
topma
feelt
redsa
itsta
likep
livei
netle
plugp
doodl
marca
conex
debra
fishn
skull
gecko
buyco
buych
the-a
heatc
heatw
farmt
farmw
moren
washa
sexyd
booth
bootf
renov
salam
linkg
bigfi
quebe
petca
timeg
beard
listr
listc
veryh
kurum
passs
passe
passf
baser
tunec
tunea
rarec
jobba
willr
willw
partf
pack4
hothe
lende
bitsh
nihon
youpa
cocom
eyese
norte
cellr
packp
shiji
asiap
buddh
newag
fixit
baske
getbo
aydin
goldl
mytra
foodg
foodn
coldw
volks
gente
downh
ofert
bigbe
bodya
datao
thaib
wears
basis
basil
amour
meett
yourn
mayma
hongd
feetf
parkh
whatl
infoe
markg
heret
bitef
arbor
respe
kidst
hotwe
equis
viewd
boatp
arqui
thinp
enigm
fishd
payto
hippo
spani
barry
seren
rainc
adamb
kyoto
strik
prism
gotra
gotre
hizli
washi
pluss
plusa
conti
shari
morta
bleac
arian
runsh
razor
painf
meinv
jared
playn
debat
popul
deskd
newbe
jobsg
ridew
disks
often
brent
newwi
whoma
yoga-
impor
goodo
hopeh
hopea
getta
hoped
serie
pipel
pipec
thedu
flyma
youfi
solom
pyram
meteo
marsh
weber
datag
wildl
insig
mostb
scanm
funtr
babyk
catte
masti
thecu
abstr
areap
turks
denis
laugh
squid
diets
infer
diet-
myhot
worko
workc
workf
ozone
workp
netro
newsk
bitse
bitsp
nowse
lolit
eastd
criti
bidsh
verit
bigsh
prati
mixma
forsh
deepa
usedl
yeson
edits
cantr
carhi
newle
newlo
peakb
getma
ridem
ridel
ridea
rider
metad
getre
airsh
dells
delld
redmo
bando
aktiv
footi
clubj
techb
tunne
leadw
tanka
passl
postn
lohas
best4
hair-
allpr
allpa
caseb
casef
dogsa
webtu
fatih
giveb
chloe
sellr
ideah
ideaa
hedge
ablet
ablep
fails
failt
theti
luckw
luckp
cafeb
lifee
ourma
satis
seemy
seemo
bigmo
killa
flatm
flatp
flatw
felix
intro
intre
idiot
vicki
mustf
textc
warwi
motio
shore
allte
fromf
lucia
hangt
riskp
thepl
donot
the-f
allca
newdi
newdo
cityn
citye
wireb
indus
sello
airfi
viole
cheep
redre
theke
tenda
jumpt
jumph
johnr
fasto
usera
dealg
harde
halfh
halfc
lookd
lookh
looku
jobco
billm
forfi
molly
joyst
bigge
allia
combi
chipm
wordo
chipc
wordt
manyc
trued
junks
grain
fitso
web-c
unita
nanob
host4
onlyo
onlyr
minel
poetr
newmo
thisl
fundl
fundm
fundf
mikel
big-b
townh
wwwle
digid
digib
ignit
showr
bacon
asiaf
hana-
chino
whosa
trivi
pinto
typew
allan
easy2
yoush
longn
doord
doorc
hotre
susie
wellr
wellm
welle
ringd
remed
ringa
toled
carwa
matte
mattc
delig
sexwi
foton
restm
newhe
eco-b
eco-p
eco-t
annal
sunte
top10
topdi
halls
loren
hotpo
stayc
stayl
stayh
loadl
itsma
rents
taskt
catst
logon
genki
frogg
snaps
beton
quitt
mucho
conni
bebek
alley
lazyp
massl
keyso
mapst
monde
pinkm
costb
pente
gimme
fiver
fivem
lucid
andli
rossi
taxis
terre
herbo
sunpa
fucki
fuckb
hotta
gogre
sexho
plann
lanka
fitfi
patio
peppe
myart
toont
armyf
armyp
headm
wishb
affil
wwwfo
firmp
boxma
mansi
sured
komik
hot-s
thatt
airtr
picku
jetse
glitt
lawre
tomas
spine
vinta
ecomm
trafi
welli
openo
opene
jacki
lost-
saili
lostc
sideh
sidew
eurek
hellt
hellf
marri
citic
firms
krish
vote4
makei
newta
harve
sexme
allof
robos
flyli
flylo
lacar
haves
megal
haveb
havea
sexyf
taota
paulh
newre
nowpa
roomh
allmy
thien
sexfa
roadh
spotr
spotm
spota
pixie
porng
pornc
votec
lesst
icest
insti
youba
wwwhe
dumbb
eleva
breas
topra
bigpo
guili
betpa
redne
busyw
loveu
mitch
propo
intof
tough
campo
campe
parkd
jobtr
buysa
wangy
catlo
catli
snows
maild
maile
datum
hugeh
winwi
turnc
provo
cutes
galer
formc
eyelo
pills
ironm
fire-
quart
almos
deliv
zerog
mainh
maind
sexle
anyli
ladie
madho
mensh
fourh
aviat
fourd
fourf
thesk
medin
medis
itech
smell
hackl
hacka
acqua
bittr
stara
sunbo
fraud
moveh
coreg
cored
corey
messb
nanog
fence
hotfa
cathy
limos
redhe
onepo
maiso
bankd
sex-d
fairh
fulll
dearw
dearc
dearb
mathe
topme
tunin
feela
feelp
effec
hotde
likea
liven
while
readi
readr
grann
fanba
allmo
amari
finge
lasti
letus
rosen
rolli
scope
fishr
malib
eniyi
chant
hidea
mongo
dropa
dropd
bigfo
hande
pooll
ricky
winen
escro
theun
shred
passw
passb
surfi
surft
golfc
numer
minde
kingj
willd
vince
partt
runlo
sexta
qiang
wildd
beatb
editi
blogw
blogh
aiden
tehno
allba
aquam
aquab
golda
getbu
hotsh
readh
voted
voteh
joann
coldd
weekb
bidlo
downm
iran-
topch
mande
debit
jerem
tellc
beave
sunwa
discr
debt-
growp
meetl
voip-
massr
feetl
mazik
askdr
gloss
whatd
lauri
usedp
ville
herem
invis
plumb
getlo
youwe
allfa
thinf
askfi
canar
gunba
teeni
canno
cuban
fillb
crank
fanto
linco
innos
among
sunso
spank
rainr
paral
profa
motom
chatc
chata
netsp
ninep
dokto
ladyd
krypt
walll
wally
portm
madmo
noter
edger
painc
satel
viaca
jiaoy
carni
pione
thefr
merce
slipp
slipf
dunia
somel
flagr
newba
wisel
traps
diskf
yogab
goody
goodj
bigwa
volum
help-
askle
pipep
pipet
pipeb
lawsh
blizz
rewar
betha
preci
high-
holdf
higho
higha
saysa
flair
wildh
mostw
curec
curel
guzel
scana
scanc
love1
asksh
whath
jobla
aream
madam
tripe
quake
intoh
bedst
janes
terma
treeh
homei
roomc
arcti
testt
testc
workm
workw
easec
widep
alpac
demar
easto
verif
talkr
seeke
seekc
usedt
kanka
rainp
pokem
rainw
raini
penca
peakc
artba
sumit
theit
leafb
leafp
metaf
youar
listd
radic
dayda
haiti
allre
hillw
airso
catca
madec
peakw
boxer
bandr
donat
bandm
bandc
clubk
secto
maryl
techr
marys
techg
techo
barfi
bestv
stopn
wwwmo
surfb
runes
allpo
casem
casec
webtr
geek-
wwwgo
netin
waves
giveh
givef
cardd
falle
pengu
topha
failc
forlo
libya
cafes
gesti
hunth
huntw
wallb
hustl
mansh
satin
delhi
luckf
tarot
fanat
intri
curve
mymed
textd
textl
allto
acura
fromt
1-800
riskc
riska
risks
the-t
netme
daysp
xiaos
slots
tople
melis
shira
flowm
natha
theka
jumpp
lends
rusho
filea
crane
fileh
4ever
kerry
backu
wanda
deald
dealo
membe
harda
moto-
confe
termf
halfa
cotto
lawfi
shelo
alexm
manyf
manym
tellb
artsh
panti
junkf
outfa
richc
riche
layer
orchi
buyfl
minet
minep
mined
fundw
fundo
omni-
woode
digil
tired
ingra
karin
stern
netgr
typel
pimps
dumpl
websp
buyho
topfa
treer
navig
selfm
selfp
easy4
easyv
easyi
kidpa
wello
net-s
klein
bigcr
poplo
erika
bigch
oneda
ringo
assur
matts
gasma
betal
vivac
goldi
popst
addma
malta
winda
leftb
tuned
sunto
mocha
rarem
inces
hallb
topdo
melan
warme
ontos
stayt
cooks
loadm
loadf
joyma
yards
kinet
launc
logop
hidef
lockr
remar
snapc
fitmo
almac
sunra
antik
lazyw
lazyt
aside
mymin
lazyg
massh
vasta
vasts
keysa
keysc
gourm
youwa
mapse
monda
buyla
twelv
siteg
hotvi
fathe
irana
want2
viale
thean
theab
wantb
polla
carsh
carsi
wirel
inten
brite
britt
accou
tekni
wikiw
pivot
tripg
gift-
giftc
gifta
gifto
giftt
fresn
mayan
runpa
diary
topfi
shopu
twent
redfa
pullt
hunde
someh
plani
daves
paysa
vantr
proxi
headh
girla
brace
tryst
darts
arten
furni
funse
perio
bytem
secti
bulkt
allie
wholi
sammy
makel
educo
doorl
airto
thaic
salva
tomat
pinga
sante
santi
manmo
copyp
copyh
fansh
surf-
hellw
dover
shing
stoph
llama
stopg
firml
cliqu
ballw
balli
ballo
neara
teali
redbu
oddst
anytr
tenta
fitbo
blowc
blowb
scent
signi
havel
theru
sexya
sexye
wikic
topwa
topwe
topwi
biket
accel
autoz
thefu
artan
mixlo
resim
kiddo
rubbe
nativ
spoti
spotg
teler
bedro
wwwso
teleb
pornl
manic
poppe
petre
topro
coins
newfi
saras
bugst
bugsb
boatd
boxsh
blowf
feedh
feedo
rockh
espac
emplo
emple
dildo
jogos
imper
oilma
ecost
nvren
factl
busys
babyg
intos
leech
cathe
harma
warra
jeffs
perfo
hilla
sendi
stepp
lixin
forre
berli
mycoo
reser
maili
sexbu
ihear
hotbo
hotbi
manpo
huged
hugew
redlo
onetr
alina
bangb
bike-
bonda
eyeli
warpa
goose
showi
seeso
seesp
pytho
rejuv
delic
anger
kirei
allwa
allwe
netho
nethi
fanfi
fanfa
nikon
thesm
soldl
medio
medi-
canyo
euroe
uptow
euro2
salea
nudis
polan
coren
messi
tensh
icons
wordb
sasha
canba
flyfi
imedi
staym
carco
artte
webne
fineg
heroi
heros
nirva
evenb
getdo
autum
mapli
maplo
rails
novas
boile
crypt
mycas
liked
live2
plugs
caran
carab
seeth
rollm
rollb
rollt
fisho
swede
heati
heatr
farmb
funco
bootc
droph
aerob
sperm
takas
fanlo
poolc
wwwto
time-
theym
winew
theus
prono
listh
verya
surfn
surfc
hitsf
slowc
golfp
kidsi
rarel
jobbe
mindi
mindg
kingk
thana
chalk
engag
partp
partw
phils
pack3
huayu
bizde
ample
sizes
artra
klick
coco-
blogv
andra
asiac
newar
quilt
allbe
goldg
goldd
mytri
recyc
tapel
askbo
oxfor
downr
downw
downd
theni
skint
ecoba
bodyg
sunre
chest
maris
discl
discp
ledst
cambo
tudou
shama
coola
cooll
boxst
factr
assis
revel
sexpl
whatm
sexpe
ecode
netdo
outco
madtr
arman
webta
barpa
herea
hered
invit
ceram
biteb
repro
wayst
allfi
kidso
flexb
fixmy
spiel
jakes
atthe
necro
chika
squad
westi
teend
tiffa
helio
canto
dateo
sunsa
wwwar
barro
drain
wifel
gates
parab
parag
perth
raina
ellis
ellie
mappa
anyse
newpa
rockc
pickh
escal
boone
samsu
washb
moreh
plusg
plusm
lady-
desir
pinke
desim
ladyn
needh
walli
funpo
notew
runst
webal
comp-
liftb
lifth
rushs
carna
keral
sahin
mikeb
socio
cento
deskm
flagb
somec
jobsd
hateb
jobsu
diskl
disko
yogad
scrum
good2
justv
justu
viva-
maddi
ellen
stres
holyc
helpd
holys
ragna
lawst
lawsa
artfi
artfa
toppe
godst
stadt
mostm
cureb
curet
valen
scanh
babyp
lovej
ebays
asksa
areac
turkc
legit
defin
gamej
squir
sex-s
paylo
dekor
kampu
gasfi
testo
terri
lipin
gladi
andes
vatan
billw
gladt
socks
deman
kitap
misty
colum
hitca
easth
subwa
aroun
casan
evera
bidso
bidsa
strap
bayou
usedd
dongh
editl
canth
themc
topte
getmi
raing
mytax
mehme
buywi
getri
getra
facen
gameu
nowst
madea
peaka
furry
donal
curry
artli
darki
darkn
andys
leadm
bestj
textu
tankb
mybiz
best1
spira
hairy
dalia
beerb
ferro
ferry
proda
slowt
paids
hitsp
vitas
paidl
siren
kaise
needw
vita-
soont
eachs
javas
mosai
ecota
ecote
cardg
cardu
fallo
falli
falla
latef
signe
masal
jokeb
jokew
failb
eyeon
cafel
colou
paulb
shopj
hunta
madsh
seems
freej
karam
sexmo
luckl
oxyge
alain
madis
mosco
menti
crest
liter
arthu
teeny
texto
knowg
knowb
redto
avto-
loopb
loopf
loopp
topst
12345
fitwi
sextu
nanod
nanot
daysh
daysi
dayse
newda
foote
rarep
webit
flowh
dares
patha
johng
guven
fried
fanma
usert
theva
danda
psico
onesc
onese
lette
motot
jenan
topba
getgr
halfw
looke
lookr
paidt
shipi
shipc
billk
dunya
easef
barna
utopi
outdr
worda
chipf
geoma
artse
artso
rumah
theth
junkl
web-t
richs
eyepa
beats
dryst
nanom
endlo
endle
thisa
thisc
tamar
synth
atten
arama
sheri
zenit
woody
warri
digif
guoji
shika
typeh
typep
typer
keepd
keepa
buzzt
cheri
ledpa
dumpd
herho
allar
getch
cinef
selfd
selfc
selfa
selfw
vieth
viett
dogba
fastv
fasti
longo
funfi
ourch
bidca
themp
bigco
logit
dento
carwi
twofi
fotot
shjin
restl
refin
givin
istan
eco-m
stepd
vanma
stept
creep
schul
leftw
annas
lefth
sunti
asper
allgr
halli
warmd
nowfi
actor
warmo
palms
palma
baoji
recru
mache
poors
loadh
cooke
rentr
rente
gotwa
drewb
aimsh
bedli
canin
taskc
lockd
goode
egold
flipp
alman
antit
antis
proge
antil
barsa
barsi
alles
vival
lazyd
keysf
soloc
actin
actif
clanl
cland
clans
buyli
corri
westd
teena
shahr
casua
wanth
wantl
pinkh
edgeo
88888
costl
costp
solos
dicks
betma
fivep
fivel
fiveh
evolv
wikib
missg
nasca
walkp
albas
thoug
attra
tourm
gotno
tour-
anypo
anypa
tanya
nursi
buyno
dotne
pullp
octan
sensa
ihome
planw
planr
davel
dolph
menfi
signc
roott
zippy
armyc
bluej
bluev
acorn
topsu
topsp
fitpa
youhe
asklo
citiz
webfl
shant
byteh
bulka
bulkd
pagan
filth
websh
wwwdo
thatl
forza
xiong
buypl
thai-
decol
doorm
thail
theem
hayat
ecoma
ecomo
rawbo
lamin
ebest
jacke
copym
copya
fanse
lostg
ryans
losti
lostr
hellg
keenb
firmt
killd
doubt
lesli
ballt
balll
nearc
keymo
fixfi
baobe
alfar
redba
anyti
jobmo
outwi
taksi
haver
iwill
winsh
winst
sayst
netsu
fushi
mapfi
wwwst
eyefi
webbe
funwa
funwi
lunwe
jobpa
guyli
capti
getwa
gothe
toppo
polys
polyb
allme
kiddy
loanh
betri
banca
spotd
madfi
payma
tento
getla
brill
lessl
lessf
petri
audre
itemm
itemb
hanso
artca
boate
dumba
apexc
quang
badpa
mayor
feedc
jobfa
picas
merch
scanr
minut
hongs
toast
chong
busyt
busyl
twopa
drawf
intop
foros
jeffr
perfa
perfu
mamma
bitpa
wwwne
grego
sendh
sendc
sameb
samed
oddsi
werbe
retir
flybe
middl
knowi
mineb
brows
turnp
turnh
leftl
regen
trann
ascen
inspe
areaw
fireg
firek
firen
ironb
punkt
guyfi
waits
seest
pachi
dogpi
seens
seeno
burst
spore
anals
anali
nethe
estar
outca
full-
utili
twota
penfi
teras
rasta
faira
fairr
sayno
chefs
sunbe
donet
movea
saleo
araba
ledli
myboo
chaba
corps
allur
seeli
seelo
keyco
takew
yemen
arttr
pende
fullo
evenh
wacky
evenw
algar
maths
veloc
nowma
onefi
xingf
novar
polym
fossi
cashw
booze
feeli
fluff
pack2
hotdo
dogtr
packh
popco
netla
plugi
plugt
iplay
art-s
wiset
resid
bitmo
herme
onero
rolld
rolla
papar
malin
fearf
heatm
heatp
farmc
farmm
farmr
boott
bootp
bootd
manto
manth
lords
anyfi
gitar
petsp
dropm
linku
kidho
twomo
petsa
fitte
graha
handd
poolm
poold
poolw
globu
artpa
klima
panto
bitco
penho
bearr
pront
lasve
listn
zerom
qiche
passc
shbab
libre
joing
joini
hitsl
slowl
tunef
wwwtr
wwwtu
kingn
kinge
willp
partb
partm
gizli
joypa
penso
sexte
hotho
huayi
ampli
eyesl
eyesi
celln
blogz
klass
forca
aquap
webra
goldf
getbi
goldr
itsfi
restb
reste
airca
foodo
addst
shesa
casam
food-
anhui
iranb
iranp
weekw
theyo
badbo
thent
thenu
nosta
etech
girl-
tellf
tellp
seekn
mygra
mygre
folkp
letsr
disca
disct
lesco
sorts
wearl
pluto
conch
monar
webpo
erect
buyit
boysh
farse
farsi
sever
picto
overg
overe
meetc
warli
factb
vende
masse
macin
carth
whatr
sexpi
hotli
hotla
manfi
baixi
addlo
gaslo
soult
limew
flypa
sayfa
herel
hereb
nueva
olivi
gasse
fabul
vikin
podca
hungr
kidsh
sexre
viewn
nudes
onsit
useth
thinw
badli
xpres
bythe
sentr
onego
payth
xunle
tenma
icele
iceli
myrea
innot
mihan
magni
datec
sunsi
tulip
barre
balis
parac
parat
amway
motob
tatoo
knife
seeca
anysh
agele
rocka
pickm
wilde
bruta
chatf
chati
chatm
funmo
cactu
kadin
bidfi
talkn
ninew
hakan
ninec
nineh
benef
calca
trist
needp
needd
shiba
wallc
walld
wallo
funpa
portc
portt
housi
notem
bully
mayfi
painl
jobha
gouwu
youti
liftl
liftp
pathf
comps
tinho
ambit
thefe
gotpa
oneco
cakes
miker
esqui
slipc
centi
centl
centc
slim-
webga
artde
artdo
zhuan
flagl
flagp
kosov
jobsn
hater
bahar
yogam
yogap
fivet
allhi
getth
hopem
vivas
hopep
pushh
anywi
pushl
legou
elisa
safeo
safea
lawso
canpa
highm
liner
linem
lineh
lineg
curem
bijou
love0
histo
babyj
askse
buysu
buysi
betsi
betsb
betse
kuail
keylo
teamu
monos
termt
turkp
sexea
raffl
walkb
cadde
poten
viast
homej
manho
matsu
icetr
testd
worke
mirro
petst
gladf
branc
dayfi
newsv
semin
bitsb
hitch
casac
bidse
manue
bigse
deepd
naive
ourte
myste
winni
badlo
eyebo
edith
ecope
sanya
cityi
fisca
twost
artbu
leaft
leafw
biolo
riden
metab
metat
axiom
fluid
tammy
cajun
madet
madem
peakr
boxes
hotcl
vicel
alima
footy
flyca
jobth
carfi
froze
sendp
artle
darke
desca
stopp
symbi
leada
mybig
washl
hairr
hairg
wwwmi
surfe
casew
dogse
geekt
gogog
guiad
paidf
clone
slowh
yukle
naija
eachf
giveg
givea
givep
ecotu
ecoto
selln
sellg
fallw
idea-
latec
webmo
tophe
luckt
joket
ableb
tradu
cafen
huntc
shopy
ourse
todom
modet
sagli
kidca
execu
karab
packc
willl
dashi
callo
worst
wizar
artho
bulls
texte
knoww
tankf
arise
redtu
suncr
shady
barbo
herse
warst
loopc
dogwo
canca
hangu
comfy
hango
hangi
sexop
riskl
riskw
remon
exerc
rayan
gaint
gaine
gaina
gainl
condu
conde
rarew
camin
buybi
aflam
cincy
demot
minil
daref
barli
gigan
gigab
modar
rushc
compt
johnp
johnk
johne
rushl
fileo
fries
backn
usere
mirae
onesi
oneso
chose
hardi
bitla
bitlo
sino-
webwi
topbi
halfo
halfg
lookn
seeta
shipm
seete
deven
bytew
kidta
farlo
funba
hao12
elife
tarif
true-
chipd
trueg
artsp
thetu
yamah
fitst
netar
formd
fitsh
bronz
shima
proli
web-m
web-r
richb
buslo
uniti
unitl
whale
buyfa
buyfi
getag
oilst
nano-
hostv
airbr
doglo
welov
minec
trout
forde
pasti
surgi
chama
big-s
bidma
tally
theoc
blogl
sundi
modma
kindf
wwwla
woodb
catwi
hanar
steri
stere
notic
typet
buzzn
chrys
mylov
resul
surfa
self-
getcl
navid
payfi
lesbo
ownlo
niger
vietp
yousa
surfd
keywo
keywe
easyu
doorh
doort
hotro
thaif
boxpa
plate
shepa
piggy
bizar
onedo
ringr
logis
aquan
mattr
matta
matth
hotmo
betab
mitsu
fotod
shuma
eco-c
stepb
stepm
creek
leftt
sanda
""".split()
for index, name in enumerate(POPULAR_PREFIXES[5]):
PREFIX_SCORES[name] = (8000 - index) / 9600.0
POPULAR_PREFIXES[6] = """
studio
travel
online
global
design
search
mobile
planet
china-
health
domain
casino
iphone
organi
sports
credit
direct
google
master
virtua
doctor
energy
market
digita
portal
videos
server
taobao
social
silver
simply
office
future
vision
school
flower
invest
friend
simple
dating
projec
united
beauty
family
golden
orange
hotels
musica
animal
better
fashio
little
single
custom
busine
campus
street
greens
laptop
hostin
movies
techno
supers
career
london
chinac
xtreme
americ
electr
coffee
daniel
guitar
lawyer
double
people
realty
medica
soccer
sohbet
green-
diamon
access
secure
pocket
nature
carbon
intern
wonder
pharma
cinema
dragon
flight
chinab
natura
africa
select
networ
christ
system
greena
inside
window
escort
bright
public
comput
indian
chinas
yellow
stream
perfec
hybrid
secret
servic
liquid
photos
action
luxury
chinae
wealth
active
safety
summer
coupon
garden
colleg
living
facebo
gadget
tattoo
modern
greenb
visual
ticket
pokers
artist
source
centra
spirit
chrome
erotic
profit
tennis
senior
upload
unique
chinad
chinam
chinah
kaixin
chines
boston
purple
battle
expert
fantas
spring
hawaii
change
mister
images
freedo
financ
creati
crysta
superb
triple
greenm
review
cancer
driver
dollar
number
pretty
create
victor
trader
rental
bridge
island
chinag
talent
classi
dallas
gaming
studen
greenc
marine
impact
chinat
cruise
clicks
hentai
juegos
greent
moneyt
graphi
models
medias
mortga
rainbo
atlant
blackb
europe
matrix
super-
lyrics
amazon
youtub
extrem
smart-
traffi
robert
script
powers
mature
privat
vitami
chinal
capita
superm
center
gofind
greenl
fusion
choice
hotel-
george
radios
cybers
desert
realto
alaska
camera
world-
greenp
enviro
greene
greend
intera
inter-
bubble
beyond
church
person
austin
florid
second
denver
shadow
joseph
french
german
weddin
gamesa
nation
premie
musics
houses
budget
toyota
webcam
summit
chinaj
downlo
monkey
superc
videot
martin
connec
broker
dreams
interc
beijin
italia
chinar
chinai
torren
expres
insure
kuwait
resume
phones
greenh
pokerp
lovely
poker-
wicked
dreamb
dental
virgin
mediam
canada
gamers
herbal
screen
inspir
englis
innova
jordan
thenew
instan
arcade
missio
compan
chinay
websit
choose
galler
hiphop
muscle
worlds
aurora
flashb
forum-
spider
stella
report
pirate
fitnes
sakura
cherry
dealer
groupe
oregon
parent
articl
thebig
quicks
insura
europa
myspac
random
forest
legacy
partyp
rescue
fetish
widget
forums
theweb
phoeni
greenf
advert
succes
qualit
dream-
photob
univer
strong
cyberm
whatis
blacks
fortun
imagin
futbol
circle
trendy
income
premiu
fishin
infini
proper
stocks
answer
greenw
greeni
brazil
moneys
player
indigo
dreamt
shares
ventur
inform
russia
cultur
advanc
black-
firstb
channe
famous
target
powerp
highwa
signal
womens
stones
cheapt
chinaa
newyor
micros
superl
reklam
michae
techni
powerm
greenr
moneyb
cosmos
pornos
prospe
georgi
silent
money-
andrew
video-
pimpmy
divine
kingdo
auctio
heaven
greats
castle
radiob
thomas
tablet
chinax
videob
porno-
happyb
abouta
nevada
survey
clever
galaxy
persia
automa
taylor
celtic
empire
smarts
pictur
pacifi
todays
music-
powero
powert
greeng
greeno
chicag
printe
joomla
erotik
dailyt
hotelb
square
smallb
marina
sounds
mylife
media-
hockey
mediat
contac
homelo
gameso
valley
legend
resort
events
philly
common
blogge
supply
clearb
radioc
happyd
chinap
chinaz
manage
sample
supert
superp
johnny
export
smooth
alpine
disney
mentor
thisis
suppor
powera
androi
academ
pokerc
client
avatar
interl
flying
thinkb
hotela
harmon
exotic
rachel
earths
jewelr
outlet
medial
region
madein
brands
consul
musict
hunter
locali
gupiao
sydney
falcon
amazin
gossip
happys
chinaf
factor
twitte
amateu
latina
integr
superd
jersey
garage
worldm
softwa
cosmic
spafin
power-
object
pokerb
plasti
dreamc
inters
angeli
angela
libert
police
killer
stephe
locals
bankru
games-
photog
andrea
destin
divorc
thinka
thinks
videom
brando
stockt
carpet
massag
happym
musicb
firsta
ratemy
localb
steven
monste
metala
compar
partne
worldc
ebooks
retail
dynami
latino
supera
genius
holida
videoc
videol
rocket
leader
diablo
synerg
flash-
printa
moviet
protec
starti
magica
voteno
inteli
aboutb
diabet
taiwan
voyage
cashin
tracks
pokerd
dealsa
politi
entera
dreama
interg
promot
lovema
celebr
touris
hotelm
france
memory
marksa
switch
horizo
honest
ground
crazyc
crazyb
greatc
mediar
radiof
engine
photoc
photon
gamest
gamesb
aussie
charli
branda
solars
medico
grands
greate
habita
knight
blackc
blackm
houseo
banana
thunde
awesom
stoneb
kitche
webhos
shopsa
clears
bamboo
paradi
hidden
adult-
visita
cheapl
cheapc
china1
passio
happyt
justin
worldo
whitec
finest
linked
trucks
rapids
logica
mybest
chance
click2
lights
speedy
prints
countr
parkin
boomer
partya
metalb
bestof
magic-
happy-
powerb
points
asian-
pokerg
pokerm
mother
interm
discou
outdoo
clubma
group-
threeb
drinks
sharet
shared
dailys
genesi
workin
youyou
allsta
sugarb
brasil
superk
sunset
radiot
elemen
machin
wester
mediap
amanda
sinema
naruto
eventa
cyber-
harris
shouji
orient
remote
anime-
watchs
greatp
letsgo
energi
radioa
poster
ultras
option
lifelo
viagra
kontor
gospel
muslim
prices
pornoc
wilson
prince
luckyb
zombie
groove
chinao
chinan
chinak
comedy
sistem
hainan
worldp
sharon
korean
centro
superf
videoa
planta
afghan
realit
cleanb
female
quantu
antivi
ultima
timesa
record
villag
vector
flashp
glassb
securi
crashb
moviem
jungle
diesel
wheels
consol
powerf
arizon
cougar
firefo
obamas
solari
infoba
grupoa
nomore
entert
dreamp
interp
escape
totals
totalf
concep
banker
produc
fabric
startb
buildi
groupa
groups
dailyp
hotelr
angels
angel-
winter
lounge
freeto
farmer
puzzle
smallc
applet
mediaf
legalm
turkey
photol
traini
style-
styles
styleb
stylea
glamou
imagec
optima
broken
sunris
videog
stocki
shangh
brandm
brandb
brandt
wowgol
maggie
publis
freese
horses
israel
funnyb
musicm
browns
avenue
blackh
firstc
china0
doggie
learni
pricec
handym
dailya
hellob
livese
mighty
seller
luckya
vietna
repair
adults
metall
cheaps
cheapb
happyc
chinaw
rabbit
serial
elitem
things
flasha
xinhua
chuang
winner
kosher
videop
ashley
stocka
metrop
smartp
smartc
progra
extrab
shoplo
pokerl
shopma
offici
saigon
soundb
stupid
magicb
powerg
donkey
ginger
shorts
atelie
aboutm
tropic
bestpo
lovelo
moneya
moneyp
hearts
shopse
dreaml
dreamw
biotec
interv
cooper
totala
totalc
cowboy
builda
sticky
invent
primes
infoma
dailyb
baobao
hotell
stockb
making
distri
sugars
clover
smalls
saving
earthl
norcal
appleb
orland
recipe
mediab
legala
tradeb
newsfi
gamesp
tribal
foreve
nissan
thinkl
imageb
imageg
thinkt
deskto
myhome
interf
quickf
stateo
explor
musicd
scienc
advice
radiop
cyberp
radiol
mental
crossb
blackd
blackf
blackl
quickt
housel
drupal
copper
contra
trance
mobili
olympi
bisnis
gratis
pricel
priceb
hostma
bikini
hellos
moving
iloves
iloveb
ilovem
bridal
homete
candle
barbar
everyb
junior
growth
homesi
oracle
willia
china2
housto
bluema
boxing
myshop
ethnic
waterb
thrill
worldb
stockm
smartb
matcha
tender
discov
closet
thebes
freshb
idealt
cleans
smartt
matchm
shoppi
davids
click-
davidc
domini
clickn
clickt
sevent
edward
partys
partyc
lightf
smokin
salest
bigtit
movied
bookma
leathe
sextoy
carlos
filter
magico
magict
powerl
justma
powerh
powerc
captai
pointa
elektr
blonde
sportc
moneyc
moneyf
pokert
meetin
japan-
writer
hearto
printp
findlo
buyers
enterb
dreamm
dreamo
dreamd
specia
timeto
intert
interd
handsa
covers
shareb
luckyl
thinkf
sharem
groupb
groupt
dailyf
dailyd
hotelc
seattl
huntin
valuea
values
floral
infore
sacred
safari
profil
constr
norman
pastor
quickc
queens
sierra
genera
greatg
photo-
photom
evento
royal-
platin
royalt
charle
autolo
rapida
rights
bleach
housec
racing
underb
grandc
greatt
comuni
always
weight
goodma
blackr
houseb
bistro
teleco
firstp
linksa
bumble
first-
freepo
equity
banner
localf
locala
localw
degree
conten
mystic
moment
playlo
identi
justmy
atomic
easter
couple
sketch
lucky-
loveto
ideals
loving
transp
adultm
happyp
cheap-
hotelo
eleven
tunnel
tianya
elitec
whites
sweetc
fiesta
worldf
worlde
sexual
smartm
compra
waterm
greatd
findin
kristi
michel
codesa
apollo
truckb
footba
studyo
yourba
autism
patric
conver
cleant
gazete
clickm
clickb
click4
flashc
flasht
lightb
clubpa
activa
foruma
booksa
moviec
moviea
motion
harley
justba
origin
wherei
thermo
velvet
speedb
southb
magicl
presen
findme
saturn
greenn
obamab
moneyl
anchor
freevi
enters
interi
intere
totalp
totall
pornot
totalg
totalb
gamebu
concer
viajes
naught
journe
enigma
update
modelt
modela
clubse
lovemy
celebs
primer
educat
unders
restor
clubba
dailyh
esport
vitals
hotelt
hotelp
sportb
offers
dirtyb
advent
talkto
sensor
alpha-
theart
cheese
earthb
earthc
fundra
infose
voodoo
doktor
spacec
spacet
crazyt
greatb
grants
oliver
findan
metalo
gamesh
beaver
twiste
eventb
lovepo
teache
imagen
imagem
imagef
buddha
ladies
roboti
rising
sexsho
suprem
visitm
northb
appare
quebec
animef
trackb
formul
watcht
morgan
starts
eterna
charme
vintag
grandb
easyca
cyberc
greata
blackt
housem
quickb
cookin
firstm
firstd
fastlo
ultram
ultra-
impuls
localt
shopto
freeca
realme
pricet
pornov
themon
wheres
ringto
clearc
nordic
stylem
formal
gamewa
bigred
franch
midwes
steves
submit
magazi
curren
freshe
adultb
playma
visitr
cheapa
riverb
doodle
catalo
waterp
waters
livela
sweets
worldd
youpor
whiteb
automo
bestto
smarta
agency
bestbu
skinny
microb
freeze
violet
superg
studys
bestca
welcom
bestco
effect
bluesk
stores
beachb
worldw
idealo
freshc
fresha
metros
cardlo
sister
cleana
progre
placeb
homepa
hacker
bestre
moneym
babylo
skillb
therea
phonem
monavi
forume
forumd
movie-
strike
teamta
moviep
before
chatte
oxygen
mounta
urbani
speedm
speedc
aspire
magicm
veggie
texasc
bionic
kinder
phonep
termin
shower
notest
phoneb
season
allian
nathan
greenv
notebo
twelve
finger
medici
linkin
mexico
patent
southp
freeho
dealsb
grafik
dreame
gaysex
xiaosh
breakb
interb
forumt
teampa
schola
gameba
glitte
leadst
bestma
goodpa
papert
orchid
infoco
loveme
spectr
luckyt
childs
freewa
magnet
warren
gameco
standa
sharef
groupm
makeup
groupp
dailyc
dailyw
instal
angelo
pressa
estate
sportt
crazyf
shopho
states
valueb
sunday
nevers
team-s
corona
willow
sectio
gatewa
fishca
canvas
alphab
bestfi
applec
applea
spaceb
proxys
cyberd
corner
pressb
videor
mediaa
mediag
justsa
newsha
lincol
radion
enjoya
import
greatl
tradep
tuning
allfor
newsfo
colors
photof
photop
gamese
adulte
gamesm
transf
econom
status
driven
breeze
bottle
radio-
hotsex
lovepa
stylec
shopco
thinkp
image-
theman
backup
liveli
uptown
white-
volume
sporti
videon
rubber
walker
consum
bestin
northa
islami
animec
solarc
houser
allabo
freest
shopha
funnyt
travis
eventi
granda
render
shemal
liveco
blackg
advant
housep
houset
launch
allthe
firsts
webdev
meishi
linkst
kindle
nights
ranger
nightb
wisdom
tiffan
sector
batter
breast
provid
localg
afterb
mantra
learnb
toledo
learnm
simpli
gourme
pricem
gamema
theres
hellom
triplo
wallpa
solida
playpa
sparta
format
threes
westco
bargai
luckys
luckyc
dialog
miracl
thinki
info-s
blueba
sailin
metals
adultd
cotton
visits
smilea
visitb
psycho
cheapo
happyh
avalon
gettin
payday
engage
india-
blueco
blockb
classy
fresno
eliteb
teamma
assess
static
statio
lifewi
charit
chilli
adrian
accent
border
divers
combat
centre
centri
kiddie
trucka
superv
fotolo
studyb
commen
gameho
member
lasert
colour
rocker
vortex
blogma
freshm
freema
landwi
actual
worldl
matchb
smarto
multis
clickf
clickp
insigh
sevena
flashl
dotcom
safewa
flashs
votema
cactus
lighta
itunes
digito
cheats
booksb
berlin
movieb
movieg
carmen
foodie
climat
bestse
stillb
mediac
cartoo
absolu
skills
urbanm
equine
fuckin
magicc
rockin
intell
presto
phonea
lightc
truebi
aboutl
bestle
bestlo
filmse
hammer
obamat
lovela
grupog
pokerf
audrey
babies
boiler
faitht
landlo
heartm
presta
handba
quotes
sleepb
plasma
enterm
dreamh
smile-
trueco
bookof
pokera
markpa
filema
total-
catcha
newage
primeb
toucha
backpa
modelm
bigban
pizzap
chrono
clubso
goodfi
dealwi
spacea
luckym
freewi
freewe
dailyg
dublin
hoteld
hotele
hotelf
shopfa
hotelw
angelm
franci
lowcos
troop1
valuem
forexa
thinkc
bethel
forex-
stress
localc
freefa
gamerg
bookin
namema
stockp
alphar
shopin
brains
moscow
appled
apples
therap
voicea
infosa
linklo
radior
enjoyt
makeme
trade-
turbot
pixels
tradel
nicole
griffi
gamesl
bestha
evente
reward
justpa
around
stylef
imagel
imagea
lovebo
royalc
royalb
truckt
livele
whitea
lovefi
purepa
nameso
house-
freere
comics
monito
bankof
rapidp
gogree
syrian
energo
gamefo
finalf
gamefa
finals
photot
jessic
primea
blacka
trusta
loveca
showme
solar-
casual
animes
method
tracka
solart
goodbu
dealso
boatlo
freesa
horset
horseb
horsea
ratelo
undert
watchi
watche
watchb
autumn
brownb
thumbs
binary
lifest
speedt
cybere
giveme
cybert
greatf
radiom
banklo
lookse
oxford
really
blackp
hostle
campin
rapidr
cookie
quickd
quickm
motors
linkse
contro
justlo
freebi
nightr
maison
prosta
barter
stonec
melody
insane
locall
locale
analog
fastma
pricea
mehmet
netcom
audio-
citylo
helloa
hellop
jackso
turkiy
thered
tableb
kingsa
threet
gamewi
shopfi
shopfo
freshs
groovy
psychi
playca
cheapd
valuef
happya
happyf
apprai
button
madiso
china5
vistas
carter
rivers
compas
chinaq
largeb
whynot
danger
cheeky
richar
poetry
welove
classo
caroli
elitep
elites
python
sweetp
databa
loanlo
impres
worldg
indexa
defend
philip
waterl
localm
lunwen
thesta
truste
dalian
cardsa
bestba
bestbi
justha
microt
linkwi
handle
bluray
ideast
humans
easyle
darkma
parkwi
supern
superi
superh
eclips
teamle
shopso
incest
guilin
gameha
almost
sitesa
prepai
lauren
worldr
musicl
myfree
taotao
idealf
jeremy
freemo
worlda
pepper
jobsin
fresh-
cleanm
cleanc
smartf
shoppa
shoppe
gruppo
afrika
jenand
clickd
clickc
clickr
extrac
mailse
extras
sevens
buythe
shopla
flashm
flashd
basica
fullca
miller
backst
glasst
glassc
panama
missma
saless
salesc
salesa
joshua
forumg
printb
mercad
printi
metali
printl
forump
bookse
justde
dealfo
mazika
moviel
movier
righto
beauti
prodig
ladypa
partym
soundf
readys
hosted
hostel
showsa
wanted
startr
starta
floors
clubca
listen
fullba
albert
maximu
hongda
teamse
pointb
tradin
endles
abouts
aboutr
aboutt
labels
finewi
claudi
asianb
bestpa
greenk
oferta
nursin
gamelo
obamac
fullmo
offsho
infobu
infopa
viking
costum
southa
faiths
girlsa
hearta
clublo
spread
easyfi
dreamg
printc
eastba
totalm
easyta
promoc
easyte
softlo
lauras
startc
thinkg
starvi
primec
builde
sticks
justco
kuaile
montan
modelb
faster
touchl
primem
forthe
themes
primet
charge
hannah
shiplo
lifein
barbie
sharea
sharec
groupi
vitali
lasveg
dailym
dailyl
abstra
hotelh
hoteli
angelc
athens
recycl
dennis
speake
scoote
basket
estudi
hitech
alphac
sounda
eartha
piperl
delive
brainb
applef
applem
infoso
touchm
wooden
mediaw
pixel-
findse
enjoym
greato
enjoyb
legals
checks
august
mycool
tradem
namebu
antoni
gamesd
gamesi
usedca
eventm
adulti
finish
arctic
paperb
eureka
callsa
weekly
kerala
guide-
stylet
styleh
cascad
thinkn
imaget
loveba
youcan
lovebe
celest
lifere
airlin
easybi
brand-
rapidb
stockf
lovers
easypa
easypo
longma
norths
dotnet
animet
backto
tracke
solarp
solarm
weeken
horsel
guarda
watchc
statea
fancyb
jointo
musicw
browna
musicf
musico
charma
happyg
speeds
fireli
starte
grandm
bestfo
greath
looksa
elegan
trueli
radiod
crossf
crossc
trustl
blacki
trustb
trustc
onsite
firstf
firstt
linkso
twenty
linksb
freeba
fights
philli
nirvan
fighta
fightc
nighta
nightf
sharpe
freepa
stonea
comman
accoun
condos
finanz
samsun
aftert
learnt
please
learn2
decora
lifema
lender
handy-
walter
ladylo
pricer
televi
frozen
pickup
hostme
wikipe
helloc
pornod
pornof
salesb
cleara
cleart
facefi
turkis
timber
kingst
kingse
truelo
trendi
mirror
sprint
bestde
bearsh
though
love-s
everwa
newspa
franco
techie
whatsa
datama
mybaby
loveta
goodto
everys
adultf
transi
transa
smiles
homesa
smiley
smileb
smilec
sudoku
housin
mailli
truema
gameto
facewi
cheapm
cheaph
cheapg
cheapf
happyl
troop4
namewi
clubfi
leadfi
vertic
indiab
chinav
dentis
royals
foodsa
heatin
club-b
eagles
getfit
everlo
animat
trinit
watert
waterf
ameris
muaban
sweetb
smartd
smartl
landse
allied
landsa
homere
latera
malibu
seslic
frugal
reserv
localp
hostpa
petrol
timest
pyrami
microp
iheart
sparkl
microc
microf
safebi
autoca
infoto
cycleb
truckl
humani
reform
justfa
offerb
dataca
freela
electa
paylas
userlo
citize
foryou
archit
arabic
transl
shopbo
stealt
transo
bluese
latest
clinic
storeb
laurel
idealb
ideala
revers
ideali
brandi
idealm
youare
clean-
fastwi
freeco
freeme
boards
smartr
alpaca
stockw
escrow
davidd
places
baobei
linkbu
shopbu
xunlei
speeda
pricef
bestra
extral
silico
mmorpg
todaya
sevenb
starsa
shople
partyb
partyl
flashi
flashe
voyeur
basicb
wheela
fastfo
lightl
chrisc
salesp
salesf
digiti
forumk
forumi
forumc
forumb
printo
willsa
bookso
bookst
swingb
swinge
filesa
movieh
bestsa
bestsh
facema
fanati
thesha
thesho
raffle
marklo
eurovi
livewi
readya
readyb
dubais
filebu
soundt
cafema
heavys
allerg
speedo
selllo
cityfi
baltic
speedp
magici
texasb
sweetm
landma
postro
powere
gooddo
pagelo
powerr
purewi
phonec
teamsh
letter
truebo
tripfa
thepla
colort
westma
sedona
asians
asianp
jaguar
facelo
greenj
loansi
sellmy
obamam
obaman
citywi
fullma
money2
canyon
money4
grupoj
grupoi
akadem
justte
grupoc
moneyd
moneyi
codema
moneyr
infopr
pokerr
newsbe
pokere
pokerh
easyho
infola
infolo
millio
treesa
girlso
butter
presti
heartf
clubla
thanks
sitebu
easyfa
wirele
dreamy
remont
breakf
thecha
intero
24hour
promol
collec
myreal
loanfi
rounda
bestmo
timesh
markma
covert
freein
sticka
dropse
paperg
justdi
scoreb
modele
clubsa
goodfa
childr
thefin
primep
polari
fotogr
keysto
luckyd
cloudb
netpro
drinkm
gameca
saglik
pureco
billpa
easyto
hotelk
sportd
sporta
realte
realta
nguyen
libera
ladysh
dayday
nicefi
nascar
exerci
cardio
cardin
hyper-
dealto
thepro
blogtr
heroes
innerb
offera
dirtyt
freefi
goodsa
goodso
homema
petite
gamere
astral
smalla
thequi
alphas
calvar
soundc
redsta
eartht
saleso
stylis
eartho
earthm
mining
brainy
easyro
braind
karate
futura
sophia
sugarc
supero
quickw
grandh
touchs
hungry
cyberg
spacef
radiow
spacew
crazyd
blinds
realpa
mediad
namelo
livesa
newsho
legalb
playha
pixela
opente
plants
traded
easysh
facebu
techto
caring
kingwi
hardlo
trueha
photok
photor
argent
transm
gamesg
gamesn
middle
bodyfa
assist
lookma
newsta
newstr
drivea
trainb
radica
guideb
founda
flavor
guides
cityof
teachm
guide2
thinko
scorel
overlo
hustle
freede
bazaar
trans-
jobspa
painta
visith
lastlo
sitefi
whitef
logics
gamepa
logicb
gravit
gamepi
wholes
reflex
stopth
native
manuel
videow
bankon
videoz
pengui
videoo
liveba
finall
cityma
stockd
urbanc
themad
urbanf
backfi
follow
jessie
playst
thiswi
viewlo
billio
westfi
projek
candid
northg
helpma
cellfi
newcar
strate
facepa
moreca
solare
brutal
firebi
dealsi
dealsh
dealsl
bodylo
kankan
famili
bankpa
mailma
mobima
quoteb
horsem
underp
guardi
watchm
undera
watchf
fancyf
fancyc
funnyc
autosp
funnya
autost
musicc
musich
whitep
grandp
rightb
bigfat
grandf
innovi
cyberl
cyberw
greati
greatm
bestfa
jacobs
daycar
cashca
jamesp
buscar
crazya
newsro
texasd
reallo
housed
clique
quicki
quickl
callca
olimpi
firstr
lovedi
hostbi
excell
teamfi
freebo
utopia
orderf
teense
wildwi
traile
nightm
tripma
justli
pionee
giving
teamha
pornst
finans
toront
perfum
newsca
storea
guidet
learne
bigsta
tacoma
bronze
sexcha
talklo
bluewi
freecr
steelc
agents
goodlo
priced
hao123
pricep
plantr
marcha
cityli
handyb
loveso
plante
lovesa
hellon
pornop
helloh
pornob
findwi
healin
ultrab
sharps
sillyb
linkto
clearm
urbanp
clearp
obscur
iloveg
missfi
tablef
stylel
telefo
vendor
homeho
heathe
bounce
sport-
trends
bearsa
affili
chocol
proyec
bluepa
vitala
realre
materi
memori
foodlo
sportr
lesson
madrid
debbie
cafede
bluebe
everyd
everya
yourmo
adultp
metalc
visito
lovefa
deallo
fisher
gameta
surema
landpo
wizard
cheape
bestwi
tokyo-
rivera
autopa
autopi
openma
club-s
planma
purema
hippie
homese
studya
galeri
soultr
papara
filefi
bluemo
alchem
blocks
watera
classb
proces
classt
lingua
livelo
mindlo
thecar
truewi
buysel
elite-
dealma
tranny
softle
indext
blogbu
spanis
bankha
shortb
suzuki
homedi
corefi
stocko
floria
cardse
redhot
cardso
kingma
stereo
milano
webpro
capito
scotts
purelo
tripse
debate
bicycl
sparks
shanxi
microd
honda-
cyclem
blueno
lifetr
ideaso
lifeto
humanb
linux-
hotmai
justfi
altera
safelo
cellma
teamlo
freelo
nickel
astros
astrol
gamehe
shopwi
dolphi
kingpa
xpress
multid
petcar
kingle
novelt
eurota
weblog
bluesa
misslo
clocks
clubwi
clubwa
lovewi
since1
blogme
speedl
safesa
idealc
freshl
brandl
freemi
locate
codere
limewi
cleani
splash
purepo
smarty
smarte
spiral
peters
davidr
davidh
easyma
granny
easymo
davidb
drinkf
multit
domino
weston
circui
deathc
multim
clicki
clicka
extran
extrap
""".split()
for index, name in enumerate(POPULAR_PREFIXES[6]):
PREFIX_SCORES[name] = (2927 - index) / 2927.0
POPULAR_SUFFIXES = {}
SUFFIX_SCORES = {}
POPULAR_SUFFIXES[2] = """
er
es
an
ng
st
ne
et
re
ll
on
ar
ts
le
en
in
te
it
ay
nd
me
ia
ce
ed
at
ck
al
ds
se
rt
ch
op
ve
rs
us
as
de
ns
ow
el
nt
ss
ot
or
ad
am
ic
os
sh
ge
ee
is
il
up
to
ls
ks
na
ro
om
id
ty
ys
ld
ly
og
ex
ry
nk
ix
sa
ta
ra
rd
ir
ip
ms
ie
od
fe
ps
th
ap
la
co
ou
rk
ey
ol
ft
ke
go
nc
ma
tv
eb
ut
um
ox
ok
ea
oo
aw
dy
bs
io
da
ub
gs
ws
rm
ur
no
do
88
oy
pe
im
em
ue
ew
ai
be
ax
ao
cs
00
ka
li
ct
ak
ca
ag
uy
ri
wn
ni
ya
eo
ob
ny
ec
ki
lk
ab
sk
rn
ff
ik
he
by
ht
pa
hi
sy
ep
my
ga
fo
lo
ti
mo
ac
ba
mp
ha
gn
so
rl
ky
rp
si
ig
va
mi
99
ei
cn
ze
ug
ah
ye
ez
lt
ru
lf
4u
di
xx
ho
11
uk
24
ul
ko
lm
68
bo
08
du
az
ux
py
pi
xt
gy
23
lc
tt
66
ci
nn
10
za
of
pc
cy
20
18
oc
21
lp
ku
65
01
ua
aa
po
sc
tz
ui
dd
cc
zz
sm
mm
ji
ov
bi
pt
rc
ev
lu
wo
eg
ud
vi
sf
we
hy
wa
oe
ii
dc
ae
xy
zy
tu
au
60
dz
77
ja
tr
sp
14
td
yu
su
tc
gh
ef
zi
af
pp
bc
fa
gi
xi
nz
hs
gg
fm
hu
yy
55
rg
yo
98
pr
58
oh
69
oa
if
rx
89
oi
12
vo
fx
ib
uo
tl
av
oz
nu
09
mb
fi
vd
56
bb
33
rr
sw
fc
86
78
vy
mc
cu
jo
80
rz
yi
yc
eh
yz
07
15
fy
bu
wl
ww
-s
sl
dj
zx
fs
sd
yn
28
bg
eu
iu
md
rv
lz
xs
ph
cd
19
3d
rf
jp
22
yx
aq
63
p3
2u
jx
xe
hq
hd
fu
db
je
sj
tx
kj
vn
uz
ym
hr
iv
uu
aj
yw
50
zs
16
nx
nh
kt
jy
hk
mx
13
bd
dr
bt
38
iq
cw
wy
rb
sz
gp
ju
mt
47
30
pu
45
51
qq
vs
-x
zu
yl
pm
04
25
pl
35
iy
dl
fl
jj
gz
06
pk
js
17
uc
cp
dm
kk
wi
02
wu
sb
26
cm
05
-e
34
yt
e1
yd
hl
gm
tg
mg
tm
xl
ml
cr
sg
03
jd
hp
zl
tw
91
dh
cg
fw
dt
bz
br
sn
xp
-i
yr
kz
hc
kc
zw
mz
gw
-m
76
44
73
gc
vu
27
dn
wz
gt
pg
tp
-c
jc
cl
90
mn
xo
eq
dx
jw
67
bl
kr
nj
mw
79
qi
52
bj
xa
vc
-a
-t
lw
83
tn
31
cq
px
nb
-1
96
lr
yj
85
71
df
cz
hg
s1
sr
81
wx
wg
nv
fg
qu
nf
48
gd
tb
mr
70
sx
39
cf
bh
yp
pw
40
fr
87
yk
dv
2b
nw
a1
gr
zj
lb
hw
gl
32
ej
e2
cb
hm
-p
29
lv
pn
-g
pd
-o
82
lg
95
yh
xc
jz
36
kd
o1
37
dw
-n
uv
uh
s2
wd
n1
zm
zh
dg
dp
xh
ih
tk
wm
61
hh
fz
57
qa
pz
e8
jr
75
dq
jk
hx
yb
yg
uf
vr
hb
-b
zc
t1
mf
kw
qc
bk
-u
jt
cv
bx
n8
tf
nl
-r
i8
sq
a2
bw
ln
lx
rq
nr
zg
rj
qs
wh
jh
97
vv
59
bq
-h
wj
wc
54
93
53
gj
yf
qw
nm
zt
3g
o8
oj
xz
fd
rh
xj
vb
mk
qo
64
u8
gx
92
49
zb
lh
rw
r1
bm
xu
oq
vp
dk
wk
-f
qe
ql
cj
jg
hj
gf
gb
kb
62
-l
vt
lj
uw
zp
km
hz
4h
qy
wp
pv
n2
zq
kx
mv
43
mj
qd
jm
pb
tj
zd
iw
kh
yq
cx
e3
42
jl
o2
a3
zf
wv
72
kf
mh
fp
r2
xm
vz
84
e5
wb
ij
jn
np
zn
xf
41
-2
zk
nq
fj
46
wt
bf
94
xd
d1
bn
-7
-z
bp
vm
fb
fh
-j
vx
74
qm
g1
kl
uq
g8
kg
qx
t2
vw
fk
2c
gk
y8
yv
qj
hf
sv
l2
wq
xn
q8
-v
r8
x4
bv
u2
gv
s8
r3
lq
-y
a7
a8
m1
2k
k8
qv
i2
uj
tq
fn
e4
x7
qt
i1
jq
n5
f1
wr
2o
vl
-3
kp
qf
jf
t8
t3
2s
l1
xv
qp
jb
o3
n3
m2
d2
d3
0s
s7
e7
xq
l8
xk
vq
s4
qz
vf
y1
xr
i3
fv
e6
qb
pj
g2
xb
qh
vg
a4
-8
fq
qr
wf
qn
c1
o5
zr
3r
mq
y2
p1
o7
n4
-5
4f
p4
vk
i5
h1
gq
-0
m8
s5
2p
x1
k2
qk
i7
n7
m3
r7
jv
vj
v8
zv
0k
p2
3a
r4
5u
hv
e9
x8
d8
-4
qg
0x
vh
n9
u1
k1
o9
o4
n6
o6
a9
t7
t5
2z
l4
r5
c2
l3
1k
b1
pq
h2
3w
3c
s9
y7
k3
k9
d4
h8
3x
v1
b2
-9
3k
4s
w1
w8
p8
f8
y3
4g
1a
d5
c3
v3
u6
t4
-q
r9
g5
g3
t9
3s
y4
0m
c8
i9
-6
y6
1s
1c
0w
0t
3n
y5
2m
x3
p7
v2
b3
a6
4d
7a
w2
0a
0n
p5
5k
b4
h3
3m
3e
z1
2x
g7
8s
l7
d9
0u
6w
9x
u3
g4
z8
y9
l9
k7
2a
0r
c5
c4
i4
b6
u5
u7
s6
8k
r6
4a
0c
0d
a0
4m
9a
8r
f5
l5
6g
p6
5a
2d
h7
4e
t0
3t
3h
3b
m5
f2
1x
1z
v4
g6
t6
l6
2i
8x
r0
7y
6d
v5
o0
4x
3z
9n
8w
m4
m6
8e
d6
d7
0p
0z
0h
i6
5s
m7
m9
s0
8g
1w
1n
7b
k4
0b
0e
6m
5x
5z
5e
4c
4k
1e
x9
7c
k6
0i
0y
v6
2f
9k
9m
9s
8t
7t
w3
0o
c7
5g
h9
h4
4y
3q
z2
9c
m0
f7
8a
e0
1u
1f
x5
7e
7x
6c
6t
p0
p9
b8
2e
u9
6r
n0
6y
3l
9i
9e
8u
f3
2t
8c
8d
8z
1y
1d
1m
0g
6f
c9
5t
5m
5i
b7
4b
4i
3y
9f
9y
8b
1p
1t
1o
x6
7k
7s
j8
0q
6n
6u
6v
b5
h5
3i
9b
f4
y0
6x
l0
1g
7f
7h
7u
k5
0l
6b
6e
c0
c6
5w
5p
5l
5h
5b
4t
4p
g9
3f
z5
z3
9d
9w
2l
8i
8m
1h
7i
7w
q1
q3
6i
i0
5d
5c
b9
h0
h6
4l
4j
4w
3p
9u
f9
2q
2n
8o
6s
1b
7n
7m
7v
7z
6z
j1
w7
0f
6k
6l
5q
4r
g0
8y
z6
9g
9p
f6
2v
2h
x0
7g
7d
7r
q9
w9
0j
6h
6o
4z
9l
9v
4n
2w
8h
8l
1v
1i
1j
q2
j3
0v
6j
6p
5y
b0
9r
u4
u0
z0
8q
2r
8n
8f
7j
q5
j5
w5
5v
5j
5f
2g
3j
9h
8p
8v
1r
7q
7p
d0
j6
w6
w4
6q
v9
5r
5n
9t
3u
9j
9o
9z
9q
8j
1q
k0
q0
q7
j7
j0
5o
2y
2j
3v
z7
j4
j2
z9
z4
f0
1l
7o
7l
v7
4o
j9
w0
v0
4q
4v
q4
q6
""".split()
for index, name in enumerate(POPULAR_SUFFIXES[2]):
SUFFIX_SCORES[name] = (1311 - index) / 3933.0
POPULAR_SUFFIXES[3] = """
ing
ine
one
all
net
art
and
est
ost
ers
ter
ate
ell
ite
ace
hop
ore
are
web
ive
age
dia
man
log
inc
ack
ice
ion
ard
ear
ill
air
ide
ome
ain
ail
ang
ent
ame
ech
ess
mes
les
now
oup
ast
ook
ind
ife
tes
ver
ews
oft
you
ong
day
law
own
ark
ove
ies
ock
ity
lub
ile
ood
its
box
lan
way
der
ree
ink
ash
ire
ist
ans
ort
ets
ase
hat
car
ker
eam
ure
our
ass
ars
ina
nes
ank
sex
use
ool
ale
usa
int
ead
old
top
ish
ick
ade
com
ght
son
eal
ign
rts
nfo
bar
sic
ime
nce
und
afe
dio
tel
ays
alk
cks
een
tal
arm
ise
end
buy
job
ode
eed
ave
nds
men
out
ids
ads
can
tor
oto
eat
ere
low
tch
how
ote
ake
pro
rld
ian
lus
ble
ata
res
red
lay
ler
ube
boy
ons
ins
ner
key
lls
ics
oom
obs
per
pay
fan
aid
ork
oad
pot
rds
dog
orp
win
ero
eet
rum
oot
als
her
nts
act
888
ats
ana
pen
fit
map
yes
tar
led
set
oon
ton
hot
mix
cat
irl
ant
ten
ean
lue
iew
fly
lot
oll
bit
han
ama
ran
uck
rea
ike
ext
ots
ect
bet
els
123
vel
let
ica
ild
ord
ara
ope
ips
off
ens
ney
ole
lab
wer
gas
guy
pop
oor
oil
eng
joy
bid
des
bus
hip
ift
new
ach
orn
oan
rks
rch
yle
fix
hit
dge
ops
ges
eye
ser
000
ush
tec
ger
ino
ath
nia
oss
tax
elp
ady
lla
rip
365
ane
ngs
uto
lar
tos
llc
ose
get
uan
max
tic
tea
ust
see
ery
unt
sky
row
mag
deo
orm
ask
ilm
any
ods
ors
ily
dit
ndo
nks
ogs
ape
eep
sat
ren
oks
eel
ses
tin
ada
edu
too
war
eak
tem
ros
sit
ert
ven
kid
era
med
den
gic
ull
ted
asy
ung
ber
sts
100
bug
ron
oys
for
his
008
con
eds
urs
lin
ria
rus
mer
168
ket
ani
ump
ves
ith
oop
try
rap
van
ple
ize
tan
san
isk
cam
lle
ody
aby
iki
sia
ees
nic
ory
cup
ier
999
vip
bay
ari
rop
ris
shi
olf
cut
rth
gle
dea
ala
cal
oke
nit
une
two
eck
pan
sys
yer
bed
ems
abs
las
uch
hen
spa
bal
eys
oes
nch
ene
ams
ley
eme
rce
eer
tas
sta
sis
ita
uth
rty
iao
ols
ary
ola
nge
ali
ltd
ngo
oat
bbs
ces
iss
uit
114
nda
kes
nto
eas
che
lip
ida
tie
lia
big
ano
ban
six
unk
eco
rry
uts
ico
mod
chi
lon
vie
kin
ogo
fts
ras
uff
360
met
lly
gen
raw
dvd
aps
via
ray
pia
ini
fer
ddy
tra
aya
amp
hem
ral
gns
oms
nly
sen
ial
bao
pic
rew
the
not
cle
min
ipe
ago
lex
ken
ous
ora
xxx
tex
mad
yte
ena
cts
ult
tis
iet
ows
eon
ado
pet
seo
lee
rse
sin
say
que
uys
ags
ype
yan
dex
eit
ram
eld
rls
kit
cer
lat
ami
ule
del
ova
-it
aim
ios
igh
ein
kan
ium
rms
arn
ism
hub
lis
bot
dev
lam
mas
ich
ito
ste
hin
hai
urn
opy
nor
ego
esh
wan
nal
add
dos
dan
sum
esk
ude
ika
gal
elf
won
rom
ndy
asa
irm
ugs
sed
ncy
uru
666
tus
sol
ona
ura
mon
gan
tep
ums
mp3
das
ima
him
eta
ute
nse
pal
ela
rex
tle
lms
isa
bag
nis
dom
oga
who
lth
tto
eli
rte
ogy
nas
rid
ils
pin
rin
rgy
may
jia
ont
lie
aws
tro
wap
mar
ski
eri
001
die
101
sms
lor
nny
bux
pix
rst
don
iya
520
dds
mit
ppy
inn
bad
pes
cha
lik
uty
mos
xin
nie
rat
rmy
wow
oma
gel
lty
fox
ele
etv
bel
llo
olo
iva
hes
eru
ond
nte
oin
avi
ias
rge
lit
ato
411
777
ati
lag
009
dor
ori
eno
azy
eaf
mac
erm
oth
rve
ila
isc
gap
imo
she
hua
tay
tre
nik
rit
ira
oda
ese
sco
lic
hao
zen
nos
007
far
tte
nix
din
udy
edo
mps
cia
xes
nus
lak
api
len
ema
lix
rue
uri
omm
lim
dry
ups
eup
oni
sas
nta
uro
gar
rix
eto
lug
nel
gin
ply
eks
aze
bes
ann
aza
rno
doc
pad
ked
rim
eos
ury
eus
ham
ete
omo
ula
aft
gon
urk
toy
ava
rez
nga
uns
fed
tik
rup
dir
iro
cap
oco
aba
sus
pas
elt
sha
rma
ien
ait
los
esi
tai
911
aco
pod
osa
emo
-tv
una
aka
ius
gay
ric
oro
tat
saw
leg
non
asi
nyc
lks
tur
tom
hut
aro
atv
sam
pub
nan
dot
ied
app
tit
hui
rio
ili
stv
gos
uzz
tix
oli
erv
mat
mir
rod
emy
bee
bin
pon
lux
dis
cus
god
fee
aga
oof
gou
sea
ido
mom
s4u
ony
ues
ely
mex
urt
irt
010
sad
bio
aff
sim
urf
tup
cos
rie
vis
nna
tty
eap
org
aku
ntl
nco
hon
gue
ret
esa
zer
sos
mic
tag
ern
idc
isi
adi
aki
688
dle
olk
odo
mei
xpo
dns
sme
uel
put
eda
has
ono
exy
ett
cas
awa
aus
247
rad
ebs
ual
ims
tee
ska
egs
sto
nam
ecs
163
dam
jet
eft
vet
axi
url
ded
pit
gov
har
obe
ird
itz
jam
nka
eis
ned
-co
pps
alo
hou
sor
ega
oku
oxy
ece
oud
ndi
nde
erz
120
abc
cor
gps
ntv
oid
dar
rab
igo
-uk
zon
555
lad
sil
uae
ota
nya
osh
cpa
uge
mma
rep
imi
www
mor
git
sup
mba
tti
yet
188
pos
why
ebe
sel
omp
abe
ubs
uke
ibe
tip
oul
jie
boo
elo
iti
cky
mai
obi
kon
uma
zoo
ege
kar
rer
eve
aks
obo
2go
800
eni
apa
aca
vil
sec
nat
ssa
oso
jin
nti
dtv
uni
zza
eps
wen
err
zar
eth
yon
bia
ssy
ojo
ety
sty
cho
mia
iga
tao
kai
rol
arp
erd
doo
gly
ept
yme
lli
owe
iba
tam
odd
cms
yin
uba
cry
eva
lax
cai
eso
bum
rra
lve
hoo
oba
hts
dus
tta
wei
dao
shu
rme
hic
att
nin
nen
dee
aru
vod
-jp
4me
nex
eur
sse
111
abi
rgo
tme
tim
ief
fin
bai
dat
aly
678
acy
srl
kat
pat
oem
588
udo
ici
sch
315
ctv
was
ifi
ben
oip
rak
119
val
emi
awn
988
kie
bon
oos
bul
erg
rax
dal
goo
rey
ify
ott
did
aha
oya
edi
owl
vas
hed
aat
bra
lah
idz
idi
but
gym
dad
pie
aph
vin
518
tum
arc
fry
pus
lib
eca
kup
got
rah
110
uce
nar
mmo
uss
ict
nne
igs
due
aar
ais
ume
abo
hus
irs
omi
ois
ful
tia
ror
sar
aan
eka
mal
ocs
vid
cel
pac
zle
rel
sks
uad
isp
789
lds
gia
oet
lum
mob
oca
sou
lio
dic
lse
dem
ped
chs
sso
alt
sik
eby
had
amo
lem
ilk
e4u
diy
ooo
mus
rog
hid
avy
bby
668
yat
bil
ilt
dco
iri
bas
dai
-cn
fax
118
-up
azz
lid
bat
usy
anz
por
ige
iel
rde
otv
ged
lts
olt
lec
aos
eem
mel
kim
iko
321
rco
enu
lco
rec
pig
alm
tap
333
lai
epi
mis
utt
iso
rox
rtv
hoe
xia
uku
eti
etc
gem
izz
sal
cad
asp
enz
kos
xas
cio
wed
nee
ulk
pai
oyu
ede
acs
zik
umb
roo
500
evi
leo
eko
yen
lsa
orx
oru
mmy
cow
rac
-me
owa
fic
aty
umi
mls
nap
dou
tsu
rns
pol
yne
arz
atz
ved
bob
uda
til
aci
ilo
dol
omb
uno
kor
oit
uji
pid
aja
hor
oly
ija
nky
mtv
e24
ifa
lys
alf
hia
eau
iji
kom
nsa
mil
kus
dup
dna
pex
sem
ypt
eez
oka
oko
qua
ior
nut
ebt
818
eze
b2b
sgo
amy
loo
tts
ulu
oal
roc
roy
lta
sai
cro
spy
kas
xer
uta
par
521
tgo
vac
txt
kle
exe
zip
-bg
neo
gie
iam
anh
-us
sma
vey
itv
uer
inx
tha
cao
woo
nux
hos
ttv
hey
itt
iff
004
lup
kis
-ro
rem
faq
rta
edy
tco
irc
234
jan
nty
loc
sti
yam
cht
ths
sur
012
xis
hal
mbo
omy
xel
inh
glu
nko
006
200
gee
uds
nli
dya
acc
uga
vox
ceo
kam
kay
ovo
opa
pak
bud
eny
tox
fei
cop
lou
nca
lut
aul
bos
-pc
imp
udi
rre
evo
bro
yit
dme
bor
pam
pec
369
tab
etz
rne
wit
bei
bol
mme
rto
rot
sie
azi
158
uki
sac
zes
ibi
ytv
iye
yao
400
och
ivo
eil
bre
aia
nme
kal
dig
ksa
998
axx
cin
lea
erk
cco
mak
eya
reg
tou
fam
arr
uka
agi
yre
thy
epa
zan
rug
oki
fen
nji
rag
nup
fat
izi
tac
joe
xie
sca
iny
gua
nox
lop
pup
wis
nst
sip
ltv
hoa
300
oho
vic
crm
epo
iku
cum
pel
580
rca
raf
tif
alu
rka
eba
mez
hef
nai
xus
doe
mao
koo
oty
sey
osi
hee
cil
zin
ako
col
ibo
mee
222
eji
tah
ewe
oji
xed
wel
cab
cca
mpa
rss
tys
pcs
dgo
zhi
lyn
gis
dre
soo
aaa
aas
tri
odi
eof
zed
hio
mlm
lei
ald
nim
rna
gus
kee
xue
gam
nha
tio
huo
igi
oyz
var
fia
atu
178
sap
scs
s24
yoo
egg
dro
788
reo
yst
suk
ync
gro
sof
onn
erx
rly
sns
kel
dra
ygo
005
kia
geo
abu
iks
gao
bis
cis
luk
mam
oir
eum
mie
hil
duo
173
tof
jar
nth
aso
nci
020
24h
nso
chy
tol
tka
sue
iza
rtz
arl
tgp
rik
tad
ebo
sss
lez
lac
tet
nio
lok
asu
idu
rsa
oti
nyi
sby
bie
chu
num
cnc
nza
ipt
onk
ccc
yal
nja
icy
anc
pts
rib
tak
fil
ugh
cka
aii
thi
eke
lme
eef
nea
nez
dow
808
ldo
iii
enc
raz
yus
igg
oyo
grp
few
hoy
ael
lap
epc
nov
gsm
eee
bey
opt
lya
raj
nno
1st
fab
-in
xit
sci
t4u
ncn
nak
bak
lso
dec
oge
cox
rof
dui
ril
zzz
fes
dly
hab
hay
onz
gda
cim
456
php
ofa
rbo
opo
uli
mov
rar
345
gol
kyo
rys
ssi
hix
gra
sio
euk
gli
600
urg
-tr
rae
oce
ynn
lto
owo
iry
lry
dby
ptc
coo
uxi
nav
yco
yas
oob
zhu
138
a24
fon
vit
eux
inz
ktv
002
eci
sho
uti
det
sya
tso
oha
gma
gor
tok
126
uya
rti
hie
ybe
rps
176
soc
maz
rby
nki
nba
4us
ifu
sli
ulo
rev
ogi
iot
oap
nue
oby
tcs
zzi
wns
rro
jor
nol
lps
fos
som
exo
rda
pla
gui
guo
eid
tdo
lox
asm
886
naz
lka
tby
erc
mms
rei
adv
yue
iaz
sub
sir
hme
sko
ivi
het
pda
erb
tez
wat
ibs
stu
ovi
bow
moo
ggy
tsa
-ad
tir
dys
ndz
anu
hel
mni
vor
rdo
zil
zee
sre
ffe
ced
cet
lig
liu
utu
nad
989
mco
dim
dix
uls
uld
ggs
868
ref
nhe
nho
rok
yup
hka
ydo
smo
itu
zia
n24
tho
yip
cci
aud
aux
rya
kov
apy
899
jon
xam
von
cik
uca
cot
jay
uxe
mpo
wii
isy
189
567
pil
oam
ocn
tiv
ppe
rou
t24
ixx
olu
tvs
llz
zel
umo
gum
aer
eia
086
nah
ecn
luv
yar
aye
nyu
900
bic
zhe
pis
raq
epe
pta
rke
ndu
acn
utz
leb
lgo
lma
ymy
rui
eah
aut
dka
pao
tmy
566
ppa
iac
izm
bbq
mfg
vee
918
uin
sei
jax
eor
lal
ntu
wal
wag
lom
700
meo
169
sle
bok
uly
pee
4x4
aji
icn
gio
iar
tei
eff
rig
898
apt
hom
nzi
-sh
bly
eza
scn
xon
exx
cre
cme
nke
ilf
eki
nom
vue
gby
pur
egy
cen
ncs
str
889
idy
fdc
nax
bam
tno
kme
lde
mum
uby
gil
rob
poo
ptv
sul
oyd
zzy
uct
ipo
sak
ugo
cko
phy
rso
ruz
128
dox
ayi
pez
sdo
ubo
enn
ahs
hum
hup
teo
jes
efi
oer
-sa
emp
ipa
858
kly
aad
exi
lew
inv
spi
mbh
yna
mec
afm
qui
oak
oci
ahi
iat
pto
alz
618
yla
usi
vat
mbs
onu
198
uco
pha
lau
obb
uze
ruk
vds
ayo
ayu
kou
cue
img
iad
yno
iem
rpg
cuk
fis
uen
mtg
zai
psy
nma
tbe
otu
aum
nem
yed
opi
ewa
esy
kut
gtv
nle
arb
ng8
hol
smy
wto
zou
uia
uip
uis
aal
cie
wes
erp
hra
nil
nig
plc
gul
amm
lol
dto
880
oxx
nac
oze
shy
yak
866
rai
-on
iah
iak
sud
apc
uso
hof
haw
nxi
osy
bli
axy
ddo
ftp
fty
nry
amz
cta
-fr
rbs
gne
usu
yto
iyu
bec
bex
ngz
lby
esu
gig
121
pre
ehe
dmy
sig
ebb
xtv
fur
iru
ipp
blu
sao
ezi
exp
cit
cki
eoh
ekt
noo
ffs
yce
ngi
upa
bah
kka
orb
iwa
aje
sbo
ogg
adz
efm
ixi
yby
avo
leu
hah
hak
zor
rov
uet
zim
cke
y4u
etu
558
lao
no1
psp
iyi
nay
cds
awl
ngg
kei
108
tuk
tub
mol
180
afa
gcn
uid
rcw
ubi
toh
mud
366
efa
apo
133
tcn
r4u
oja
tly
zit
-ya
inu
e88
uve
nei
nec
166
sly
sla
yee
bom
mid
vpn
gri
apr
fel
oux
xiu
osu
a4u
314
fzl
lev
jas
btv
8sf
coc
nir
eim
lob
ozi
awk
bcn
isu
bau
baz
bab
kip
rub
mae
mah
paz
dep
gaz
mio
ilu
nju
vps
vvy
roi
fcu
nbe
efe
iez
rif
osf
yde
ipi
l4u
aam
evy
etr
inf
glo
laa
lca
foo
zam
lov
cem
n4u
opp
opr
bod
ooz
fus
afx
ysa
adu
dcs
css
eha
lva
fid
dsl
atr
r24
hco
uko
axe
199
seg
vos
kno
leh
tla
rba
-do
gna
nip
cak
lmy
rli
eif
psi
yor
kao
e-s
awi
luz
upe
mea
388
088
sno
2me
mot
dez
186
dah
esp
tml
rcs
tot
raa
tid
kys
tkd
nla
fad
nby
011
135
139
bys
ebi
p24
ndt
dha
djs
vka
yxx
gfx
h2o
amb
rdi
npi
eue
u88
gla
uai
uwu
ntr
lcd
gup
spo
spc
noi
rla
psa
loy
egi
cee
nab
fle
awe
yaz
zsw
bmw
moi
yyy
pco
nsi
rdy
yso
yse
gme
pik
aho
msa
ejo
eja
rkt
rky
ibu
oua
rpc
hog
hoc
vex
sah
ivy
hea
jee
ucn
s2u
yro
nok
yis
ccs
oza
021
rsi
hno
unn
kok
-go
gco
dax
wyt
568
aju
nmy
pme
ppi
uye
noy
rlz
hod
jen
zie
rbe
laz
ntz
gno
nid
spr
ghe
003
awy
lud
bla
dip
bou
maa
kki
pax
twe
gat
lda
oen
mik
toe
jyw
icc
hug
pra
alc
gry
mbc
saz
vio
n88
tpi
nre
vol
zio
wee
noe
nob
zal
eir
src
iyo
ecc
arq
otz
eaz
dof
bac
unu
opc
nys
hya
sbe
gad
bir
peg
mkt
ddd
444
sfm
vre
288
4x7
imm
gop
toc
nur
mur
nni
sql
msn
ppl
ebu
ssc
hir
taz
y88
-ua
dso
nze
mbe
smc
tpc
cic
rdz
-fx
xen
nbc
ypc
emd
530
uzi
sox
ffy
ycs
wig
kea
268
xsw
mem
doh
165
kko
afi
dak
oar
toz
elz
elu
ppo
ahl
uyu
izu
koy
-ks
iec
taa
vcd
bbo
bbb
fog
i88
lre
onf
on1
yzx
vik
exa
npo
npc
ofe
nvy
aky
kto
wax
gur
htv
zak
yth
srv
cep
asc
lil
kaz
rut
flo
wil
028
ecu
yap
gyu
uny
oog
ooh
tux
mmm
tow
iin
daa
rci
pim
ubu
kul
bst
csa
yuu
an8
roz
arg
cay
joo
bba
699
blo
xml
uil
scc
vim
tli
yra
wai
gut
cac
noc
zas
awg
265
069
gyi
dil
dif
nyx
nye
tov
lyw
iit
mug
anx
nbo
arx
ouk
owy
258
fou
osk
ugi
cto
aes
nku
lcs
wad
uza
usb
soy
yol
nag
wim
-hk
hly
sny
lur
263
upi
dik
lsh
orz
orr
deh
csi
ryu
kol
gai
ldi
cul
oel
mim
kur
nuo
mut
rny
fas
duk
yli
-id
sif
lti
hok
250
-sy
-st
olz
vez
aab
lxs
zic
a88
ofi
s21
-de
er1
tna
lcn
zap
yim
loe
stc
stl
lir
oxi
utv
ecy
shq
yah
yaa
kir
ayz
kkk
hys
-4u
bix
pea
nsf
y24
-24
dac
pgo
nje
mup
361
nns
mya
hoi
atm
pri
haa
sok
jim
saa
sag
niu
vix
ugg
g88
odu
tss
daz
eok
s10
tey
kta
epr
xyz
pse
dwe
cea
yms
769
idd
esc
yca
wie
meg
tya
rul
fla
orf
imy
hex
koi
966
bim
pei
ufo
mka
ysy
esf
pir
equ
lbe
nuk
p2p
ynx
efs
hik
ixe
gre
sid
dwi
eju
fie
xim
hac
hau
sop
dda
cpr
xor
jer
iju
jak
wet
lav
cog
gni
xyy
noz
512
yts
ucu
lod
e21
kad
yma
766
ycn
x24
pys
urd
ngy
yad
266
080
iwu
moz
dei
rav
ewz
rga
n21
rao
ocu
tig
ssl
lno
ehr
jus
sui
rko
yny
efx
arf
dur
rir
bya
g4u
tcc
owd
mgt
fir
atl
fra
hap
156
hew
sew
878
cig
cid
ckr
qtr
lgi
phi
tnt
coa
lch
spb
aon
mbi
noh
odz
gji
rji
e2u
e-x
kco
rsh
mgo
104
ovy
160
yem
opu
oie
gga
gab
ewo
nsh
reb
bur
kum
ajo
bsa
xxl
gix
tiq
ppp
iai
fcw
uyi
dyy
sst
755
758
oye
uuu
swe
yly
hki
tae
rpa
tci
qin
ipc
218
sce
tpr
rdu
lel
lep
gbo
kre
cma
d4u
ogz
hre
coy
aic
ao8
ghs
nou
mri
cce
nmi
mpc
mph
mpi
ffa
ymo
wol
naa
nao
ozy
shd
luo
kem
zul
iqi
828
pcb
lbs
lfe
s3d
gmt
pio
oas
ieu
lba
eph
ywe
ppc
gim
roe
pov
alp
llu
teh
oyi
-ic
fem
usk
tav
siv
991
jos
112
vam
dsa
qiu
zos
tvn
ddl
acu
aah
hgo
crc
z4u
pso
i24
ijo
eyu
xco
m24
lga
pho
xec
nkz
zet
371
ypi
xjx
cob
nif
zco
umy
uzu
sps
zat
tda
sra
lof
loh
mpg
mpe
cey
ncu
stx
oxe
gsa
nae
n-s
auk
meh
ovs
mca
ayn
ulf
nyl
snc
ixs
920
925
bmx
mop
848
abz
ggo
ryx
pca
uha
oky
rgi
oed
tsy
lfs
reh
-ex
ysw
tms
nwe
-ca
cne
tii
kah
pom
-ms
uwa
sbc
efy
rii
edr
rri
130
nfa
nfu
bbi
ske
fig
399
ir3
mey
a21
dhi
sot
s88
kha
ipu
tew
onx
rza
sab
ivx
gde
fta
afc
-fm
iha
ofm
bri
emm
cod
gzs
gha
nof
510
516
o4u
yok
uva
ifo
adr
o21
rsu
shw
721
aui
riv
kgo
isd
tyl
bax
yey
boa
orc
gea
tua
tud
tug
paw
oia
moa
edz
csc
968
usd
nqi
ksi
tje
ioo
iom
nwa
xxi
eyo
gto
dpc
yum
hur
an2
rmo
pok
gfu
faz
efu
duc
apu
tau
dla
ndr
dno
syn
lpi
255
olm
150
osp
g24
sae
598
sep
voo
i21
agz
2sc
mty
ihi
jzx
mcs
pty
aio
lce
obu
wah
umc
eku
dxs
rle
zad
mre
4-7
cct
ych
e10
iev
122
bco
102
dok
unz
nyo
tut
uju
gge
909
def
sbd
kop
kot
gae
ewi
nsu
ddc
dae
n99
udu
mii
sfx
-ed
yss
buk
oai
mul
-az
cna
gid
icz
yuk
fco
pow
ehq
eho
cbd
igy
hwa
t-s
sua
rkz
jiu
dum
fet
fex
bye
nfm
lte
990
117
owi
owu
nzu
wth
epy
mbu
nxx
sav
axs
k24
zmo
zka
l24
xan
nro
cip
zis
-10
dbe
-dc
-dz
e3d
556
i4u
yke
hta
hty
t21
zaa
nmo
veo
cec
puk
eeb
nau
lki
p10
wok
983
neh
baa
boi
flu
355
cys
mok
iuk
abr
syy
cur
tsi
kso
-es
cja
rcn
-cc
dew
occ
368
420
nnu
tki
n10
fac
faa
rki
jsj
e18
tgw
ylo
edb
018
oum
sez
joa
tca
116
mdo
dsm
smd
ir2
-sz
hag
155
211
gdo
ezz
hei
ohi
ohe
tru
h20
mla
lxx
evs
npa
npr
aed
etx
577
lge
nvn
er8
emc
yka
ykj
umm
ghi
nod
fff
gww
vus
yoh
wme
-la
ifs
773
rud
kci
o24
-hq
hli
sna
bez
buz
125
shk
auf
aur
upy
105
gys
dod
tny
pvc
uar
-pr
tui
tuo
pap
mou
moc
mog
gac
969
ufu
dag
sfc
kby
iol
enx
hyd
ahr
gir
msc
yjx
-ok
tky
lhi
iep
dub
014
016
-ip
-is
ltz
jou
hma
avu
tcm
kdo
115
113
ndc
vaz
ucy
p4u
8gp
dsw
kfm
ezy
ztv
irk
frm
sov
veg
vea
oey
dyn
099
ttc
798
0sf
ezu
ohu
kla
t88
k4u
jew
979
zki
voz
cir
agh
agg
dba
szx
et1
phs
zem
o88
hro
epu
cau
515
517
amc
amd
srx
d24
rwe
pug
lii
wor
aqs
bem
ngw
shr
zyy
987
dma
106
ne1
lwe
jjj
mcn
slo
-rc
maj
kku
r3d
uja
yyw
yya
ggg
ggi
dey
gah
nsy
reu
ysh
adc
esl
o99
gok
tog
-an
icu
yjy
iaa
hue
anj
an1
tko
ssu
suo
suv
sut
edc
dny
kfc
5sf
256
cub
a2z
azu
vec
ofs
zol
838
yze
ddz
dde
heo
scu
vir
itc
aai
rfm
cix
agy
ctr
exs
bts
kra
eoz
uay
dsy
emb
aie
niv
eky
spe
aoo
511
-ny
eip
d-s
sri
rja
mp4
vem
ncc
liv
kaa
e-1
rux
ozu
wik
ng1
otr
wks
upo
103
sjx
066
tyn
bap
omx
yea
228
-re
maq
gez
mmi
zjx
kof
tob
860
rau
sde
919
a99
mig
sfl
gmc
esd
-ch
-cd
env
gom
niq
ocy
-ag
eln
elm
528
rye
sht
gke
ssh
teb
nbi
dfw
778
e11
ylw
xpc
spt
ouw
avn
vad
mds
398
smt
177
ufa
fre
hav
gzi
nss
hcn
onc
rze
saf
axa
acz
cpm
cpc
cpu
smm
dsc
855
kli
aac
uez
nri
ctu
ctc
599
ued
lej
xcn
iho
ckz
-dj
teq
hri
emt
rni
spl
aol
aok
nop
tld
nmd
cex
tfm
885
lif
liq
660
gso
lko
-hd
cyy
rsc
urr
n-x
eai
eac
bcc
fha
upt
me2
neu
drs
sjw
dri
ygg
ovu
wup
-to
mcc
kio
yez
yeu
-ru
oox
ulp
lst
paa
abl
pcn
sbs
koc
bik
pep
zha
lyy
dab
lfa
u99
re1
ksy
tmm
bub
nwu
goa
nhi
toi
pka
-av
cng
t10
roh
roa
n3d
ryo
nlu
rmd
yhq
-ma
ehd
ehi
alb
llp
efc
-ko
677
985
edd
718
711
131
137
997
bbw
hmy
myy
uoi
atc
lpa
tze
fol
lro
r2u
fme
yzs
tvo
910
hep
scy
caz
x4u
sev
310
316
nrg
voc
vot
pss
ije
338
kpi
ihe
ckt
jat
aen
ofy
er2
uas
kti
-bd
co2
haz
gnz
aib
nii
lcc
bpm
pli
wam
guk
soh
eio
ccu
loi
608
cez
tfx
669
wia
029
hla
ngh
ng2
dpi
yab
eav
drc
ygs
wus
uky
i99
kky
gex
moe
eyi
csj
sba
181
cui
nsm
nsw
sda
sdb
sdc
949
589
emx
adm
dcc
cla
e99
toa
thu
thm
-as
lll
sxx
iag
huk
ssf
ehk
cbc
rva
wls
efo
dua
980
rrr
nfc
nfx
oui
vco
658
996
rpi
mys
hob
uor
fim
iau
070
soa
soe
ydz
ipy
ttt
hcc
sax
xme
ivu
210
tvu
tva
acg
acr
hez
scr
pby
xoo
591
978
aag
tpa
oje
pdf
955
h4u
tdr
571
ofc
350
358
-da
tma
in8
in1
eoo
r88
550
zey
ntx
y-s
hru
aig
bpo
mtr
iby
usf
noa
lmo
ce1
yty
gwu
ccy
578
amx
tdb
oju
hho
st1
std
881
ymi
wos
gsy
eex
e12
bea
ngx
lul
wka
zyk
xsy
a-s
dmc
724
hna
109
tyy
bae
s99
mci
wwe
zuo
hba
ooi
lss
kke
gei
abb
deb
qqq
gag
bib
pem
303
dap
req
586
-eu
-er
ysc
adj
tmo
dcn
zda
enk
imb
404
tod
nul
elk
hsa
hst
ahe
rmi
lna
poa
pti
ptr
fak
977
o10
-ka
iee
890
zzx
-il
bda
993
rpe
bbc
hms
lvo
cav
zao
fik
mde
175
xic
oqi
lpc
yfa
yfc
wtv
jkw
sod
osm
osc
815
ipz
b4u
ddi
uir
cpw
vii
itm
gfa
dfc
jem
311
zko
voy
trx
rdc
lzx
jal
jah
jai
ucc
uci
nva
jzs
eol
erf
erl
uam
xgs
ktr
cok
s69
xyx
wab
mto
aor
aev
ccn
nms
loz
mpy
stz
o-x
e-n
3gp
rur
naw
svc
lke
urb
ngc
127
tbc
luc
yay
otc
hns
upu
neg
isf
gye
tyx
tyr
kix
kil
xzx
vdo
zup
oic
uje
kmy
sva
488
r10
muk
cse
csd
pce
g99
2it
ewy
syo
jxx
wid
ldz
nsc
iis
gcc
gcs
afu
tsd
lfm
ilz
esz
es2
rcy
rck
ubt
kuu
kuk
clc
hyy
nhu
lbd
cyw
nub
muz
ahd
nnn
527
msw
mso
msi
msg
yjs
kyy
-op
ahu
lny
nbb
sux
616
gpc
pmy
efl
cdc
sii
136
nfl
995
ebc
gyo
ebz
dll
rpm
ndj
ndd
vak
hny
dni
uon
xir
-si
olg
oln
151
a08
212
hev
w88
cpl
zma
itr
evn
tps
318
sef
pdx
pdo
ijn
jcw
trc
cte
gba
tlc
phe
weg
nvi
jzw
inq
551
wgo
emr
c21
ai8
ofu
n2u
wak
mtn
hto
caa
nog
513
zay
yif
mrs
mro
iqu
vui
amn
yof
yos
mpt
m88
hhh
ffi
ceu
asf
nct
nck
kab
e-m
wop
adt
bop
768
eeq
rua
ycc
-hp
023
022
ecd
tbd
shg
lua
jlb
blr
upp
me1
ovn
xup
dov
le1
80s
kii
kic
zum
oou
zna
gey
paq
pah
zqw
wse
abg
okc
gaa
bii
ufc
pey
sds
lyt
goz
tst
-eg
coz
njo
-cs
gow
ocr
cnn
icq
ahq
kyu
e51
hud
t3d
nlp
dyo
rmc
lne
ehy
hwl
t-e
fap
fah
fal
tef
teg
sug
jit
dfx
jsw
cdn
aaz
lja
ljx
yls
siq
cxx
mfm
sku
vai
dnc
wot
mda
smx
dst
dss
sym
atx
lpe
prc
fri
tzy
tza
-ss
-sf
fot
3sf
g21
ipr
233
ttr
djd
933
saj
tvb
pbx
852
hzx
rfa
cra
cri
312
xar
xat
mly
m4u
958
l2u
-12
jab
jac
aec
etn
omz
356
kro
yrd
in2
eot
299
erh
377
ktg
-bb
hrs
hax
aix
epp
lci
pls
plr
mts
t20
aoi
rlo
zag
pno
wcn
mra
ei8
eie
575
amt
tds
loa
mpu
-li
asz
e-i
gmy
iyy
-ji
hby
dvr
kcs
ulb
024
027
147
707
ecr
pya
ngr
tbg
keo
a-t
-vn
eaa
m-s
686
jjw
tyz
k12
225
-rx
div
tni
bog
ooq
sne
lsy
iwi
wny
929
tue
pae
yys
pcc
185
iky
nsk
chr
daq
udd
coe
iou
esq
es1
rcc
oaz
nhr
eqa
vvv
elc
ppr
pmc
mss
vtv
huu
fcs
uyz
uyo
ssz
ssj
rmb
pox
ehu
750
fav
lho
ynk
yns
izy
i-s
pvp
ar1
edj
us2
sib
ltr
656
ebg
ebr
rpr
bbe
hox
uoc
mdb
smi
179
nzy
lpg
rgh
dhe
-so
olv
159
khi
811
vei
238
799
sau
z88
916
mna
b24
viv
595
zkj
nrx
mle
trd
mny
cno
cti
s08
337
sga
zig
-fc
-fi
458
ihu
clo
cku
aet
ucs
ofo
ofx
wex
mha
yrx
-ds
mcu
pth
cmd
cmc
er7
laj
375
wgs
nbu
teu
ypa
-sd
c24
epl
yki
mtb
e66
cag
caf
dxx
yta
-ng
eii
gjx
dzy
yom
asl
stt
887
liy
ymm
jsc
ruc
hi5
ycy
ngd
ngt
129
xsj
neb
mep
a3d
-tw
gya
gyp
doz
389
slm
slc
yeh
0mm
ooy
nym
lsi
iwo
-ps
y3d
gec
tul
bme
pag
ujo
mox
lky
ryn
csy
okr
sbw
mmw
sbg
mmc
koh
kow
ikk
fuk
sdn
xby
vle
chq
945
lfx
mip
ysm
cov
cje
iok
gmm
adh
tmd
pii
pip
nwo
en8
goc
lbb
nua
-ac
eyz
7ob
hul
fcc
an3
nlo
7sf
ssb
lng
poi
636
uhe
wby
wbe
-md
-88
rvi
alg
rtu
hiv
zzw
ixa
cfw
hko
apm
taf
kbe
yba
avm
r-x
282
ndm
dsf
171
698
hae
xkj
810
oex
ydb
yda
gzx
r21
fps
ssd
jii
hcs
onv
vki
heh
uie
uik
scg
vig
yxy
ftw
uey
gyn
crs
rfx
l21
xal
nrc
ijy
cib
y69
cny
uem
odc
pfw
exu
kpc
m21
aea
phd
eoc
er4
er5
450
ntc
mvp
ypr
aif
l88
obz
xyk
waa
mtc
htc
t2u
ekk
c4u
ibb
cah
dxc
aof
gho
mby
sow
lmi
zac
yil
mrc
qie
eiq
psd
gju
d3d
egt
pul
asd
883
o-n
o-m
ncp
e-g
289
gsh
760
e08
eea
aqo
esw
na1
wix
awo
mgr
a-n
720
aup
nef
nev
107
m-x
689
cmy
-te
gyy
tyc
omg
xjd
809
opz
zus
zui
tne
ayt
lsc
xly
201
928
qsf
hdo
pau
ujy
moy
mow
kma
ggw
ggr
y18
cst
csp
uhi
183
ewu
syj
rgs
cua
s18
bsh
mky
sdz
lym
afo
afy
afs
sfw
ksu
-en
ysj
025
fys
hls
pif
oaf
oah
oax
xfx
xfw
enj
en1
goh
goe
gog
eqi
lbo
543
nui
-ai
el1
hse
gif
cnt
msy
xzy
mgs
gta
eje
huy
anb
anw
gku
gka
ssw
dyw
poh
508
4vn
-mo
-mi
rvs
lha
e-u
s-s
s-i
770
rtr
ieb
hib
hii
arh
dux
duu
zzu
kdy
01k
710
-iq
cty
bdo
oug
ou8
733
hmo
avr
avs
tcl
vah
myx
xts
xty
xto
877
dsp
xii
xio
nzz
lpr
yfm
irn
fro
tzx
-sl
-sc
haj
bnk
nxs
tkj
ydy
g2u
p88
090
on5
zow
qoo
xmy
zpw
fio
uol
hxx
213
gdi
gdl
tvi
ddr
acd
y2u
ohr
rxx
cpt
eiw
xos
b2c
aak
bha
h24
ojp
ojy
rff
nrv
ijk
sxy
jcc
trs
evu
odj
le8
d88
-fu
ihy
-yu
bta
kri
-db
298
akt
fay
ypo
-bo
llt
nib
thd
538
yko
plo
obg
s66
xys
xyw
wac
uwy
432
ibm
ibg
cae
aoc
ghu
519
zab
4u2
-na
-nc
eig
uxy
uxo
am1
psl
gjj
sro
srs
e22
cef
cei
cew
ifm
pum
stm
o-s
oxs
dtc
gms
gsl
idt
ohy
lkj
tmc
kca
ozz
hle
beg
bev
urc
urp
tbb
shl
luu
kez
kex
kew
yag
wkj
zys
a-m
a-x
n-e
eag
dmm
bca
nep
sja
ne2
yge
mxx
jjd
-ta
dop
unc
161
zur
226
a10
tng
boh
boc
ayy
uat
mau
snw
lsf
or1
202
205
208
dkj
pab
zly
iup
wss
twa
cso
rym
uho
uhu
okk
uhy
vna
9cn
ldy
s12
nsz
nsp
usz
fey
chl
iia
gci
tsf
dav
dgy
585
ogr
320
ksw
t56
esr
esm
ubb
kuo
-ci
408
nhd
lbi
bss
bsc
-ar
-al
hss
hsy
thq
gik
tib
pms
1sf
msm
csw
1ng
tns
e56
pof
poc
502
v24
rpy
981
ehs
757
jue
t-x
ptp
nbd
te1
rku
yni
s-e
o11
gpr
i-x
771
779
rtt
riq
e69
le2
ixy
wna
edm
ed1
-ir
rrx
rrs
usc
9sf
us1
ouf
oue
ouz
ouq
ltc
650
ybc
031
731
r-e
tcu
ndl
vap
myo
xte
fut
txx
czs
hoh
dsj
smb
xid
oqu
yfx
wtf
gxl
dhc
-se
fod
fom
an5
302
ssn
ttw
ttu
jig
-qq
y99
797
blm
fmy
on8
on3
axo
xmm
uou
yzy
uoo
ohm
cps
scl
mne
856
yxh
itp
hzp
ueo
596
aaf
aaj
rfs
seb
seh
876
-3d
cii
y66
s-x
agd
uei
lef
335
336
npp
lza
ihk
ckj
dbs
jaw
jag
eul
etf
rbi
357
in5
akk
lae
zeo
kte
-be
-bc
thk
lcu
zcn
obr
ekc
cax
aow
aou
628
yid
yia
pny
4-u
etw
ccp
cch
tdc
nmm
loq
yod
d2u
d21
asr
hha
jly
ifx
pud
ncr
tfc
kak
kap
rha
bzj
667
665
ymx
e-c
wom
wof
286
gst
gsc
gse
gsf
gsj
cgo
aqa
utr
tmt
wic
wip
ecv
snu
ngm
shx
yax
yau
yai
zyz
zyx
262
269
a-b
dmd
dms
bcs
upc
lww
lwa
isr
typ
slr
y21
opl
203
4it
089
dii
boz
nyr
maw
141
148
mab
xle
826
tuu
786
imn
imt
imx
hdz
hdd
oii
oif
akz
0tv
wsf
908
ryp
koa
vnn
t99
ikz
960
syu
nsr
mke
lyr
lyx
lye
lyl
ddb
-21
ch1
gce
daj
l3d
ogp
ogh
328
sfs
kst
xdz
ysf
ilp
ioi
gmo
adn
fyy
dcd
565
oao
tjx
344
340
zdh
sct
kui
enw
imc
t77
e98
nhk
nhs
eqe
lbg
mua
kwe
-am
jys
icp
424
hsf
t13
kye
xzs
vto
ng7
yuz
n11
633
635
-my
t-c
alx
s-t
gpa
rtm
cdj
dul
ylu
wnd
986
a-p
2do
o3d
015
717
713
cfl
ibc
bdf
apz
apl
feo
siu
dwx
nfy
vcs
n-1
dls
jot
jog
uqo
fka
tce
skj
6sf
vax
vay
xtd
xtc
058
055
dnf
czx
czl
mew
a23
tzz
dhq
-sp
sje
foc
soz
azh
azs
816
ydc
230
231
ssm
onr
f88
zoe
zom
ivs
1cn
c88
gdj
a69
heb
ohs
cpi
cpp
scm
191
itx
fte
fth
tpo
tpt
zky
jco
uea
odm
ctz
rdb
zir
mjx
lzy
kpr
455
jad
jae
aee
579
tse
570
rbc
xet
yri
yrc
eom
eox
7th
zei
kts
jxc
op1
hrc
419
t08
c2c
bny
coh
thr
thc
sza
hte
aod
aoa
ghq
ghd
sob
lmd
zah
pna
ytw
yti
eic
rwa
uxa
psc
amr
dzs
yot
mpr
kzx
e20
uvo
sth
dte
dtx
dts
351
rhe
rhd
oxo
oxa
e-e
ydu
o-e
00y
-jo
gsi
e01
rhq
cgi
idx
kau
x99
kce
g56
-hy
l-s
rsk
rsy
beh
beo
ng5
shc
yac
dji
mza
zyw
a-c
a-r
x10
n-g
n-a
dmo
728
aug
meb
drx
isz
xum
jjc
doi
doy
om1
mch
167
yex
oph
k10
223
xww
iqq
iqe
iqa
-rp
fsc
snj
sni
lsd
orq
orl
923
0ok
787
hda
moh
mof
r18
wst
edv
edt
ggd
fww
y12
eyy
deq
pci
182
187
184
863
cfs
gdu
syx
ldj
ldu
bip
pew
308
ufe
8pk
mko
mki
sdl
xbj
917
jgs
chd
gch
tsm
tsc
tsw
dau
pga
udz
ksf
xdj
xds
cjx
iop
iod
fyi
tmp
piu
kua
i51
i56
wdo
-cz
goy
to1
qyw
bsi
bsd
pki
oct
nud
xxy
lcr
vvo
ywz
ahm
ppt
gib
xjy
yjj
kya
gth
ryz
ejy
iax
n12
505
d11
tzw
uhr
m99
753
t-m
pte
al2
fag
8ok
te2
611
ynt
wlw
i-p
e13
iea
fga
sww
x88
676
wne
wno
013
cfo
cfc
bdc
hkj
fec
132
nfi
655
p21
rph
ajy
tcw
tct
mfx
ndb
ndp
vaa
txh
mdy
396
smw
dsb
kfw
e50
oqe
lph
gxi
2nd
prs
dhl
-sg
bni
bna
umz
nxt
nxy
zra
ipl
ttp
dja
939
jip
791
hce
blg
onl
uks
fmc
yzw
yzz
k21
gdb
tvc
915
912
ohk
uim
cpe
mns
scw
smn
0au
dse
193
xol
klo
itn
hzs
ftv
976
y08
dfm
ugy
rfc
seu
nra
vow
871
sgs
agt
tdj
odr
lnk
cnj
djc
gcy
cli
jaa
-yi
kjw
lgs
rbd
s20
xee
xem
xeo
wep
nvo
inr
cmo
nkj
erj
uaz
uaa
ntd
mvc
coi
ai1
gnu
aik
537
plz
htx
spx
spw
spm
spn
aog
aoh
lmw
yik
pnc
0cn
mrt
ytc
ccd
n66
dzx
c-e
o66
o69
819
uvu
ffl
ceh
as1
ncd
ncg
stk
kac
663
e-a
e-o
o-a
00k
eey
eeu
ruo
pwn
fda
utd
fdj
svn
dvb
rtl
ulz
kcn
ycw
ycd
1tv
bfx
o2u
o22
ecz
8dy
rsl
cyp
pvn
pyx
awz
awd
urm
mga
shs
dpr
shb
lui
zyn
xsp
xsl
a-i
a-z
a-v
p30
n-t
n-i
eab
0it
dmi
hne
bls
s56
xhd
-tx
-tc
gyl
tye
omd
mce
kig
kif
yev
0pk
220
-rs
a11
a18
fsa
dib
diq
tnc
cgg
hbe
cgs
cgw
ooe
oow
oov
sng
maf
zno
825
-pl
gep
921
tuf
w99
oio
oix
x3d
xny
xno
yyz
yyj
905
dej
deg
csl
ryy
ryt
bka
zjw
koe
kod
ikt
fup
963
fum
ldb
cuo
bsy
ufi
s11
mkj
lyk
m3d
chg
iic
gca
tsg
euf
tsb
tsp
daw
emz
ogt
xdd
2pc
iox
wya
adw
adl
njy
bui
nwx
en2
nhc
ocm
ocd
jyo
icm
t18
hsh
hso
hsw
ywl
ahy
cns
pma
ttx
525
jdz
msl
mse
yjt
-os
ng3
yui
iae
iab
e55
t33
ia1
prt
fca
n18
poe
501
yhe
o77
-mc
jur
bbt
g-s
cba
cbs
hwy
fai
suz
n77
qcw
lhk
4pc
izs
wly
koz
i-t
776
cda
rtc
cdr
bfc
uus
uum
tgg
grs
caw
tcy
taw
taj
p-s
byt
nfe
whe
ebd
030
p2u
rpo
bbd
735
h88
uqu
ndn
lvd
0-0
xtr
lmt
txl
txp
czy
smp
dsk
at1
ztc
yfl
yfi
wtc
sjz
tzi
sjc
haq
foa
fok
305
azx
nxc
osl
lrc
ve1
301
ydn
ydl
ydd
fpa
098
jic
hci
on2
on7
xmi
xma
wpc
216
214
gdc
bjp
ohd
ohq
mno
pbe
850
wre
gfs
aap
uex
hgh
rfy
rfe
seq
zkw
vop
lxy
ijp
fzw
593
tdd
tdy
339
zii
xcs
mjj
lzs
m2u
-fl
ckx
szj
gbu
aeg
aem
btc
rvo
cvs
rbg
rbb
xex
wea
mhw
mhq
vst
gxx
-di
hpi
hpc
in3
477
gls
bry
bru
erw
s77
uao
laf
ntt
ntn
ogx
-bj
op5
hrm
412
418
cof
thx
epk
thg
epd
rnz
532
531
zca
ykt
obl
waj
wav
b88
mti
yve
spk
aot
n08
aex
zaz
yie
wcs
mry
ytz
ytx
ytu
-ni
-no
vum
vur
uxs
uxx
ccm
gjg
nmc
dza
dzw
606
wmy
egr
uvy
uvs
ifc
pua
stn
o-z
lih
kav
rho
e-r
o-g
o-i
280
gsp
eev
eec
cga
aqi
naj
svi
cdo
ycg
o25
o20
xqw
-ho
-hi
-hs
cyg
bew
ttz
ngk
124
shh
dpm
keg
djx
mzy
260
xst
xsd
xsh
a-a
a-e
swl
eao
sws
h99
auc
aub
neq
sjy
xhw
m-p
m-m
m-a
ygy
ovt
g10
1xx
248
isl
isg
jjl
-th
-ti
068
gyw
hjw
hjy
un8
omc
fny
slf
lqx
xjh
806
yel
zuk
k18
fst
tnz
ma2
zny
lsw
lsz
or2
zsf
zsj
204
-pa
-pi
hds
hdc
pav
paf
xnc
yyu
r99
hfc
l77
okz
sbr
zjs
eqo
dgl
syl
syw
syz
oea
uft
oee
sdf
sdj
lyz
lyg
i10
iip
iie
gcw
948
tdz
tsh
euu
ts1
lff
lfc
lfi
u98
pgs
d99
nqq
miu
sfa
zfw
-ec
ohl
cou
ysk
ysl
ysd
cjp
wyn
gmi
adf
esx
dca
oac
oau
8tl
nws
s51
scp
wdy
enl
imd
wso
nhq
pke
pko
545
nuc
mui
-au
jyu
jyy
jyj
cnw
icg
icd
hsd
nnc
llf
ro1
e77
jdw
jds
d77
msj
yjd
4th
o58
gtr
gts
gtu
ppv
dpa
yur
yut
t3g
anl
anm
anr
ss1
dyr
dyl
rmg
lnc
pog
503
nnt
yhk
756
t-1
rmx
igz
igm
hwe
t-o
alv
nbj
oyx
619
610
lhe
lhq
e-t
s-c
5tl
i-a
775
jsp
jsy
jsz
-ku
e17
iei
iej
ieo
hig
arv
ar2
tge
swa
swz
lje
yll
grc
edl
019
cfx
bds
hke
045
usp
fez
8cp
taq
dwa
ltt
ltx
zxx
xrx
iex
dlo
730
737
mfa
ndw
owz
hnc
xtx
wjs
txo
050
dnb
dnt
jmy
w24
626
8gm
mdi
dsn
atn
kfu
xif
691
yfy
wty
tzs
dhu
dhs
-sm
-sk
fop
fob
sog
soi
qip
veu
lrx
ydm
ydj
oeu
ydt
ydr
k08
236
ttm
jil
938
931
djo
hcm
onj
onb
ukt
zoa
zov
xmr
xms
833
830
uos
kja
zpc
yzt
wpi
215
219
21c
a66
ddv
tv1
bjk
bjj
uio
cpk
cpg
dsh
pba
195
xof
xod
xok
b2u
851
kls
isj
499
1aa
590
971
cly
aau
h21
hgc
exl
ugl
313
se2
pds
psj
voa
vom
vok
amg
lxh
kna
-30
k66
jcn
evc
odb
n69
ctt
330
sgl
sgc
xcc
npd
npu
vmm
i01
-fa
-11
ihr
k88
ckm
dbc
dbb
szy
jaz
jav
gbe
gbb
aeo
btb
et3
etl
576
cvn
zgi
phu
ofl
353
352
mhe
vse
jzz
-dm
-dg
t66
inl
eod
eop
cwl
cmm
-fs
077
er3
nkt
nkx
tnb
uws
zeh
zez
s45
373
xgo
nt1
kty
ypx
tev
-bh
e86
416
9dy
ilh
bnb
niy
n22
n20
epm
rng
533
ykc
obc
mtu
mth
mta
mte
kva
yva
hti
ekz
431
fbs
fbb
uzy
ibl
ibd
spp
aoe
aom
aov
lmx
622
wco
ytt
-ns
hvn
cck
d-e
aml
dzz
yoz
yop
mpp
mpz
asx
3er
hhs
747
hhc
hhg
egm
ioz
tfl
tfs
rhr
o-1
ymp
e-2
adx
283
jpg
-js
3go
gsu
eeh
ruh
bgo
idn
idb
dva
svs
v10
cdi
ycl
645
r23
ecm
0gm
8dj
l-e
rsw
rsq
fjx
urz
2sw
ngl
tbo
keh
kep
otm
otb
wki
zyl
zyh
xsc
xsa
a-o
p3s
n-b
eae
dmx
bcm
aua
10k
meu
sjs
drv
yga
680
xur
jja
-ts
060
tym
tyd
tyu
doa
omj
380
s91
sls
akr
lqq
opd
vda
fse
fsw
jhw
fli
nyz
nyw
lsl
820
orh
pwr
iwe
c99
-pt
p98
927
dky
dki
bmy
paj
zla
zlw
yyo
0th
iu8
wsa
wsi
wsl
hyt
hyw
901
twl
twp
eyr
twy
csf
mml
sbi
zjj
rrl
869
ikr
ikh
gaj
ceb
7it
ldr
cux
l18
s19
ufm
s13
304
ufs
usm
nsn
nsd
zhx
lyd
xbe
-gh
448
chm
chx
iio
daf
n98
re2
nqu
mif
323
miq
zfs
zfm
muc
-el
vro
-01
ysi
ilv
ioh
ioa
njx
dcp
rcu
oaa
zdy
xfm
wdc
ku2
enh
enp
enq
yqq
gof
gob
gox
bsl
bso
ra2
ra1
nuf
s79
f10
xxs
xxa
xxo
lcy
elx
icb
t11
hsc
hsi
hsp
eyf
ic1
ppk
ppu
gii
tih
roj
n33
n3g
icr
icv
pmp
l99
523
529
msp
msk
msf
ms2
yjo
b99
ryl
xzz
ppz
anq
dyz
ssp
ssk
dye
rmt
lni
rmm
pou
1qq
-mx
-mt
cbb
igt
e30
t-t
e3g
t-a
t-h
t-i
qcc
oyy
lhy
s-1
s-r
i-c
i-r
0dy
772
-kk
cde
rtn
rtx
e15
ar3
uup
swx
duz
n56
riu
rih
lji
ljj
zzs
ylt
ylc
xpr
qer
gru
edn
edx
cfa
hky
apd
usx
8cn
byz
byy
ouo
ouc
vci
4tv
ou2
zxy
ybs
ebk
dlc
jol
rpl
uqe
r-s
r-c
r-m
mfc
tcp
tcg
tcd
n01
skc
dqc
kds
nd1
ndx
owm
vag
myw
myn
gau
xta
r66
y80
czj
uot
uok
mdl
mdg
393
smf
172
dbz
xil
nzs
t35
nzl
oqa
vgo
lpw
696
yfe
wts
251
sjj
dhd
a2u
jkj
jka
bno
umn
an4
s82
s86
azn
153
xky
nxp
nxa
nxl
osx
osz
nlv
813
ve2
oez
ydx
iph
ipm
gzc
fpc
dyh
jib
jio
jik
jir
hcg
onp
onq
rzi
zoz
ns2
kjs
gdz
ezs
tve
ddj
914
acb
acm
heg
hec
scd
smk
pbg
859
yxl
itg
itw
wri
hzy
gfm
gfc
ftx
ftz
jeu
j88
jed
aav
bhi
bhs
067
tpp
317
pdc
nru
vog
sgt
959
950
agl
trm
trl
evt
trv
597
rdt
rdr
ctx
rdj
sgm
sgd
sgg
ziq
xcw
xcy
afl
nps
mja
mji
2sf
lzb
jug
ckl
gbg
aei
llw
573
tsz
lgg
rbu
rbj
phr
wem
mhk
i66
krs
jzy
yrs
-dr
-du
inm
cws
mcp
296
qit
brb
nkc
nkd
nkr
erq
559
553
552
zep
zex
pjx
nt3
cfm
ntp
ntw
ntg
mvn
ktu
jxy
jxh
yph
-br
y-x
wzz
emf
eml
emn
ilb
ilr
gny
gnc
nih
zcf
yky
obd
xyd
waz
waw
-ly
3at
szz
guz
htr
e68
t28
ekr
fbi
ca1
ibj
tja
zax
wcc
mrw
o45
-nj
-nl
-ne
gwa
vra
tls
e44
eix
eih
eik
0am
ccf
d-m
pst
psu
dfz
tde
nmg
srd
srj
dzh
sru
lo3
601
awu
yog
lgc
mpd
dtz
mpl
jrc
veh
745
hhe
e23
ffz
ffo
uvi
cev
asg
st2
tfe
882
o-o
kae
oxc
e-f
e-b
e-l
c10
gss
vya
hjx
765
ee1
ruf
rhi
aqq
aqe
aqd
pwe
fds
fdy
svr
tmb
hi8
kcc
kct
yci
648
wib
wir
r22
o28
4sf
xqt
-ha
701
145
snt
l-t
cyc
cym
bef
bep
beu
560
pyr
fjd
e-h
a09
tbs
dps
shm
ttl
kec
yaf
otk
otl
otx
mzz
zya
xsx
k-s
a-h
a-l
p3d
a-f
a-y
xdl
a-1
2cn
eay
bct
upx
me8
meq
ne7
c09
244
-tt
a33
gyr
fmd
tyj
dob
omn
383
386
omt
mcy
kiy
xjc
xjj
xjt
803
yec
opk
iqo
a12
fsd
fsl
ayd
ooc
nyy
snl
lsp
kks
3rd
buu
zsc
gte
iwy
-ph
p99
a77
geg
926
tuc
783
qsw
bmt
oim
zli
378
yyr
-69
0tl
r17
r13
iut
wsd
wsh
hyn
wsw
abd
907
twc
de3
a55
h18
jdc
ryb
bkk
okt
okh
zjd
kog
x56
gds
lbz
gav
y77
syc
syb
dgw
ldd
cuz
rgc
rgl
l11
307
-by
nsb
nsg
sdg
zhy
lyf
lyo
lyj
i18
-gd
-gr
-gz
vli
vla
vly
m35
chc
chh
chk
chz
dww
da1
da2
tsl
tsk
a98
a91
sih
ts2
587
582
re8
re3
pgc
s33
mij
mib
326
sfz
sfy
kss
ksd
mub
367
468
466
t55
ysg
ysz
ysp
cjc
wyw
ad1
dcl
dcj
dci
dcw
bup
pib
piq
nwy
kue
xfc
ku8
e90
hyo
8it
eqq
zxs
bse
pky
363
d56
kwl
xxc
xxh
elv
el2
icl
t12
ahh
pyc
sqw
llh
icx
ll2
pmg
pmo
jdy
7ok
u3d
msu
yjh
yjw
csm
-oz
tba
vtc
gtd
yul
yua
hu8
iay
t30
anf
tke
n17
rmz
yho
yha
yhw
-mm
gvn
752
jum
cbn
cbt
tlu
al1
nbg
lhs
lhr
s-m
wla
gpt
gpw
i-b
i-i
i-l
w4u
efr
jsb
jsm
jss
-ki
cdf
cdm
cdz
e16
ieh
f24
fgo
tgs
tga
dut
duw
-13
riw
qan
895
ljs
u-h
zzl
zze
jlw
wnc
984
716
bde
bdy
hkc
pxx
feg
040
f08
ta2
byj
vcc
f-t
654
ybt
ybj
ybl
ybb
xro
2be
joc
rpt
bbl
738
hmi
uqi
fkj
r-t
skt
sk8
vab
ow2
""".split()
for index, name in enumerate(POPULAR_SUFFIXES[3]):
SUFFIX_SCORES[name] = (7566 - index) / 15132.0
POPULAR_SUFFIXES[4] = """
shop
line
tech
roup
soft
club
life
edia
news
host
mail
home
blog
land
ight
info
live
sign
site
love
city
port
book
orld
usic
game
team
ames
king
ting
tore
list
link
tion
plus
corp
free
zone
bank
card
jobs
talk
work
ster
time
atch
view
girl
sale
show
orum
lock
town
tube
park
mall
hina
able
ance
care
ouse
data
food
here
play
spot
ring
pace
help
post
cafe
otel
oday
core
nter
loan
ndia
wear
ower
band
ound
code
room
page
best
film
test
deal
over
mart
plan
star
tyle
mark
rade
udio
ones
oker
arts
date
auto
rain
tone
fish
ball
ideo
ooks
hair
cash
ates
call
ress
well
face
ards
base
less
trip
ages
wire
pack
ware
down
adio
fire
tour
ales
oney
ings
head
more
name
avel
omes
tree
race
lace
farm
fast
ites
find
west
sion
stop
sell
uide
cell
clan
arch
rate
file
pass
blue
easy
ding
fund
safe
hone
kids
hoto
good
nder
ents
chat
ater
orts
ride
porn
long
away
anet
ever
side
iles
reen
gold
lady
load
wise
form
baby
mind
road
2008
ours
door
wall
eads
ters
look
idea
edit
back
orks
ines
lass
wine
hill
text
note
feed
save
hack
pool
word
eyes
ream
part
sure
cast
body
ling
igns
edge
area
scan
zero
eart
flow
real
ming
main
inks
golf
this
each
ease
gain
bits
rock
-web
hare
oint
ways
wind
ship
hunt
oods
wang
bear
logs
ness
cars
ning
ools
uild
case
-net
ania
fair
only
hell
ocks
mode
eals
4you
rint
ties
otes
vice
open
path
hall
cure
hand
obal
gear
ands
rush
read
ture
tory
ovie
rack
roll
rand
ions
mine
five
hits
pain
bill
days
made
peak
that
ifts
enet
bile
firm
tart
byte
desk
-inc
move
nice
walk
tune
next
east
alls
arty
hope
mile
ital
icks
oard
hing
oman
illa
mage
will
tuff
high
meet
labs
heat
hole
keys
type
rest
like
toys
asia
vote
foot
step
tock
rent
wood
rice
ills
alth
lean
tate
ords
rise
lead
drop
lick
lash
imes
wiki
pick
loop
wide
boat
full
rive
outh
oice
orea
hang
chip
last
irls
lift
agic
snet
tage
logy
know
lots
sinc
ders
copy
send
cent
stem
bars
unit
cost
rect
done
bids
eman
ange
lite
eats
fans
vent
wish
iver
nine
snow
luck
cker
cool
ergy
fine
pipe
pics
ilms
ogic
orth
ense
gift
enow
tell
eeds
ille
hard
rket
poll
vers
user
bets
acts
task
mate
late
otos
rust
oans
kill
tter
ader
etal
army
2000
lies
hool
apan
tors
tank
leaf
four
ther
opia
kers
even
disk
cing
ears
buys
unds
ares
fact
size
soon
make
alty
guru
ndex
trap
sweb
jump
omen
onet
what
tape
turn
come
wash
odel
aily
ller
root
lets
true
mily
orce
ries
ends
fest
ebox
inds
ence
gate
lack
wild
take
ator
mass
feel
feet
with
ving
2009
rite
tips
ells
item
pair
risk
ouch
aper
acks
ctor
chan
ross
ping
grow
tems
hops
andy
ment
junk
irst
self
deas
disc
turk
some
shot
deep
sing
comm
ando
inet
yoga
lane
bite
olar
erve
rown
crew
boys
tool
sart
hold
aweb
ella
gone
-art
boot
pure
dies
appy
tudy
hite
ward
rnet
ucks
eens
need
uard
alia
akes
flat
peed
stay
hour
ield
ists
sage
bike
tong
slaw
wing
eweb
aker
sort
rtal
pple
erry
draw
ider
rica
wave
aste
plug
rage
flag
nger
hoes
rder
earn
egal
diet
ocal
raft
rver
arks
hong
suit
tars
ners
odes
heng
fice
vies
wins
serv
teen
aven
yuan
fall
intl
tics
ered
reat
bugs
arte
expo
nion
eway
them
ails
lips
ency
vest
8888
buzz
fill
ides
eone
moon
sets
pets
tand
egas
rums
-usa
song
your
pper
foto
nner
itch
esex
wife
rman
onic
cuts
rama
reak
ains
lend
ista
orno
ruck
laws
dark
zine
elaw
amer
nova
paid
arth
eyou
uest
year
osts
resh
tnow
pile
most
dump
sold
ints
rans
ical
kind
camp
ette
addy
peru
alon
tnet
rose
folk
chem
ique
4all
term
nnet
cams
ount
rope
cene
ycle
urce
ntal
ecom
ores
mlak
einc
lone
rule
ebar
orse
used
anks
loss
cube
rick
moto
dogs
hine
navi
tick
bags
sino
razy
heap
ants
rich
keep
rash
elle
ogle
eans
rime
surf
fail
lans
sexy
alue
taxi
skin
anda
many
ffer
into
ecar
very
vids
dian
uddy
cket
emix
once
want
push
oors
tive
gent
ingo
thai
eout
tall
give
nweb
gers
join
hink
tees
ying
ures
aint
ebuy
tels
cher
ices
dent
emen
lant
ejob
nlaw
deos
aria
-pro
cape
unch
sent
atic
iran
auty
uote
yers
eboy
pros
seen
left
alks
cope
nada
ayer
lose
oads
lers
clip
chen
aris
ello
lder
lion
rank
from
fear
elog
bell
ainc
lity
taff
shit
till
epay
xing
ives
lost
heck
fang
abel
iner
hows
mers
bang
cold
roof
nite
iews
ooms
plot
rame
orms
nart
harm
comp
aces
sman
hero
aves
cale
boss
joke
oing
rner
lnet
lish
emap
cess
tbox
loud
eday
2010
ches
ngle
maps
epot
ngel
eady
edog
arez
dios
lice
ryou
eace
etop
miss
ecat
ited
rant
oweb
tron
oach
ango
pays
tman
hire
ules
uses
eria
oves
sers
roxy
pull
oung
rink
ston
hart
orex
prop
soul
ntry
ruth
arms
iang
none
hide
trix
icon
rial
rson
legs
pply
hips
fuel
arma
mama
ynet
njoy
uman
pert
nson
iler
odds
ging
mann
anch
oxes
lian
dare
llas
togo
ford
pers
xtra
yapi
gets
tail
cart
fate
warm
yman
ytes
nity
webs
eeks
sone
fone
eset
ject
ewin
enew
larm
efit
yang
tran
mith
apps
ally
ases
dnet
ghts
iweb
loor
tcar
ourt
heel
aber
eter
rong
tops
uang
voip
unny
lect
flex
meds
dels
ront
obao
scar
slip
cial
nara
erts
tang
amps
hear
stic
mber
lear
hion
gger
kiss
gram
tile
tout
blow
grup
lert
otor
quan
ious
ebit
olor
unes
rail
sbox
aver
urch
arry
much
oles
quad
aser
poet
efan
lake
ople
efly
role
ushi
kets
tice
sons
efix
ninc
ondo
laza
guys
uick
eams
jack
raph
otto
thin
sway
fuck
wers
oute
epop
ault
ekey
eros
lley
tend
dart
pots
kins
asis
ugar
rlaw
eall
dead
tlaw
sbar
limo
yart
1000
susa
ebid
rweb
tlet
seat
harp
eair
zoom
ause
rian
slow
ikes
otal
izza
-car
feng
hain
ible
coop
tral
ante
teel
just
neck
idge
bulk
mess
lson
yday
ejoy
-sex
tair
tats
bird
eguy
rden
ires
poem
tbar
said
mill
ocus
oise
eage
anna
cats
acom
sand
pany
tica
ancy
eoff
deco
near
lair
stan
mics
ados
upon
dlaw
reet
rkey
nkey
dget
utos
esee
oner
rare
past
tian
heet
ccer
evel
asil
ugly
ttle
liao
oinc
bout
iest
fits
bles
hana
hild
rops
berg
nics
ager
lbum
rena
cean
ated
rone
ests
nets
eels
asic
tyou
says
prod
unet
eten
tjob
ylaw
casa
ebay
anta
ebus
dear
tsex
wyer
eath
-one
rter
viet
chic
ebug
beat
lery
dmin
pill
beer
abes
nnel
leep
plex
visa
anga
ison
sake
imax
bury
orge
itas
dman
rend
dinc
syou
deli
dell
tweb
sall
reme
opic
ecan
elly
tlog
tery
have
hate
lain
toon
nica
debt
eson
hers
etoo
elet
oose
half
indo
when
aman
eusa
etry
ebet
aith
tinc
arab
rine
wolf
hree
ybox
tfit
itor
sher
stat
ente
ason
eing
flix
ehot
ausa
itle
oken
cake
tdog
eled
ofit
runs
wait
rees
tits
swap
nair
hung
isit
hile
gang
exas
torm
endo
logo
tcom
arge
srus
kart
ades
ndon
stor
muse
icom
nest
mple
cinc
ichi
nect
anes
keen
ento
rist
tals
nbox
apes
oats
nusa
hint
inch
oglu
elay
rity
tbuy
rinc
etax
dude
olic
eget
hape
onia
dish
amed
ewar
atel
busy
mans
ador
haus
dbox
then
aine
lbox
adds
sson
demo
sult
dong
ooth
lart
nage
etwo
nsex
rill
nman
tent
unge
inal
wars
ycar
ffee
spro
sbuy
crap
tpay
seed
worx
gage
doll
rbox
lage
cook
ague
iami
kits
hirt
azar
ians
immo
oots
vine
laim
tact
yway
argo
rway
lays
adas
eets
linc
urse
babe
dout
-law
tten
olve
otec
-com
ular
sten
yboy
uter
eeye
eaid
eact
onto
andi
ekid
eden
yweb
slot
lang
aran
clue
imit
mobi
dult
evan
uite
rart
erra
sllc
tmap
euse
lter
hift
trax
inux
duty
inic
jazz
pire
felt
iary
bros
ucky
shan
ache
jing
esit
epro
iana
ovel
nday
ntor
bind
rall
enie
lman
rave
nate
ehit
itty
elow
huge
ynow
bama
airs
oder
ared
efor
oria
gnet
erie
sout
pink
maid
rror
math
boom
oool
elot
nton
hman
twin
hase
tfly
2012
chef
ntre
esat
illy
nude
ydog
inos
ishi
fers
cine
uits
lege
ubai
euro
lina
rips
toff
dway
aqua
asks
shin
rium
ssex
eoil
obot
tway
nies
iche
iece
mmer
raid
aday
zhan
vida
oshi
mini
romo
ecut
dice
fing
pnet
bean
atec
stin
nime
gems
inas
otic
docs
bies
diva
soil
tbid
2you
tboy
uice
ebig
iano
reed
same
omic
lent
iend
igit
char
cons
tguy
nema
dcar
mega
bone
agon
shoe
2007
dnow
uilt
nway
gles
ixel
ycat
mani
emag
elec
emon
tkey
hive
mint
elli
nyou
ryan
aler
erde
mnet
rvey
menu
etin
baba
erse
slam
hbet
eras
dits
eron
etro
kate
moda
inda
minc
cles
iter
olis
sses
rnow
ntop
evil
rang
bowl
quit
wifi
scom
gine
raps
tack
esta
kaya
ttoo
weet
aire
enti
must
ndco
obby
ruit
alog
lens
orte
trol
lton
smap
etea
rics
eice
ysex
rsex
tare
ebed
atis
hort
tbus
rina
ecor
tfix
anic
nuts
beta
grid
eate
tson
iger
epen
taid
acar
djob
hinc
ilan
-man
onda
anco
knet
ousa
mean
xico
alla
stro
odge
sday
wner
ecup
rove
nnis
gogo
sjob
onds
-llc
ongs
umps
oper
iser
eler
mold
tusa
iman
reer
-edu
anka
enta
ribe
dwin
rada
snap
2006
-box
oops
moms
alaw
tags
rugs
abox
oart
ntos
-ltd
eran
emax
tred
slim
bing
njob
mojo
dsex
nerd
amas
llen
kman
rker
tmix
ueen
esix
hoot
tana
oups
ogos
eaks
haos
olog
alin
utor
nail
nits
aise
rion
yjob
ilot
ican
hots
eher
olls
slog
irts
udes
sync
shut
enda
ncar
anel
ybuy
yage
yone
roid
shed
hood
ckey
rget
guns
urus
kidz
elab
2day
udge
amos
teye
llis
mere
lier
rind
ncer
tmen
sted
oons
ocom
nbuy
oran
sset
ient
gypt
9999
enot
acer
demy
mike
lubs
esis
spin
acon
eeth
emad
does
onus
stry
bras
oter
jans
than
lust
bots
0000
achi
fare
mods
undo
nker
stom
oral
agen
cctv
yard
tcat
ussy
tjoy
esay
tist
echs
sian
lweb
rcle
eers
thit
bali
yyou
ethe
cate
llaw
mist
cave
anos
llow
pact
drew
bond
kies
soup
tpop
saat
icro
lamp
soff
rbit
rize
vita
ixin
jian
tein
ckup
yfan
bomb
piao
ncom
cusa
yinc
nall
anas
helf
ypay
zzle
iren
tire
erit
sltd
ybar
esus
olio
rane
aart
rboy
sist
rism
onix
oyou
nbar
abit
rbar
rief
elry
dale
dyou
enus
lore
tura
pals
iart
adam
sana
lank
soap
edya
huan
plit
eits
vert
gels
anon
lsex
arin
econ
rass
tbet
mpus
lazy
tino
e123
dict
seal
lues
zing
cade
uare
eres
lynn
goes
rcar
itup
eder
java
ural
vast
esto
avis
owin
eton
ntai
mera
hore
eren
tfor
dbar
mond
aren
rton
nian
gals
isco
lcom
erin
lnow
tina
ored
stas
3000
apid
toil
pell
ymen
hats
itos
nsen
tnew
nama
anny
e365
hnet
labo
lord
lver
punk
espa
male
ract
ntan
ijia
idol
maya
caps
tday
lcar
anal
yair
orme
arim
vibe
ired
llet
seye
pump
anto
para
taly
lare
rnal
lime
dbuy
iste
nnow
abar
nwin
koyu
smag
glad
prep
duck
xian
ngas
knew
eply
nfan
irus
eand
linx
dson
atik
putt
skey
oins
roke
gest
tfan
ects
cock
cked
frog
atar
bull
1234
obox
barn
dpay
obar
ehow
eral
scam
rses
emod
oost
tico
dboy
told
orus
zona
chin
tbit
stax
anie
sair
pint
omed
were
cert
kyou
dumb
cept
edic
okes
thot
cweb
ince
agle
elax
lein
ttop
leon
spay
bush
erty
obuy
ncan
tons
evia
slab
swin
ario
gene
rtin
dair
sfan
rome
roma
burg
waii
rday
pinc
idia
lias
hara
alot
2005
lobe
aken
alem
nboy
tset
tera
1004
alan
tton
dall
echo
xiao
ehat
atan
tain
ntas
ttax
asan
-lab
koff
ntel
oset
cola
ibao
sfor
oast
soso
gray
itis
cnet
burn
fort
tale
olin
rado
rten
pend
tpro
tsix
flip
rout
poly
asex
lbar
obil
ewon
-bar
erus
uity
nten
kout
opes
tcut
dome
dage
iris
nfor
liss
gies
atus
llon
mont
eadd
poor
hlaw
dfit
else
aroc
ajor
cion
tgas
ngda
milk
pine
maru
asty
enix
gmbh
rris
kage
iday
aked
eper
iper
iley
smen
tbug
rosa
amax
ochi
ntis
apro
dkey
eask
spor
tmag
irty
fter
dial
ript
npay
nett
fuse
bore
eker
ybus
aims
tcan
lles
lled
eris
shes
omax
deck
yfly
nsky
olks
afer
itan
chon
bers
lama
rjob
tsit
erks
enia
dset
cord
ogan
ginc
trak
edin
rger
bble
lker
esky
ples
ulse
bada
rmen
klaw
lala
ntec
irit
aram
uyer
bert
opro
rtop
sity
afts
ione
gman
yeye
tled
lout
esum
itec
ssia
olid
ocar
hark
silk
ashi
oore
smix
ontv
-now
ldog
epal
sfly
reno
dfly
yles
osis
pads
-fan
esin
teal
elan
alab
pice
exus
ould
emos
etta
onow
etti
brew
osex
luna
zhou
o123
enny
ssue
llar
erow
inha
rdan
esli
rdog
aron
imer
dweb
iers
itel
stel
coco
itar
pare
-you
boyz
amin
rana
tric
ande
bath
ltor
pray
dled
dder
ntax
lway
ntin
5678
fame
uong
unts
ptop
dock
lbuy
nium
iron
axis
arat
uyen
gllc
6666
iber
dfix
cout
isan
kweb
ndos
omas
tele
efer
asta
vino
lark
mweb
mour
nfly
bnet
odle
ogen
tome
tkid
okyo
beck
onth
maxx
iled
nese
ough
goal
-max
orry
rier
anne
dges
yson
olaw
avip
yguy
tcup
yhot
mask
scat
iinc
ehim
lade
omer
owen
atos
etto
ndir
uren
mone
anat
todo
poke
ogue
dred
-ads
navy
lday
lpay
aspa
slut
ndas
they
bass
tual
coat
solo
omix
lari
alis
lame
klam
ener
runk
idan
ises
ntra
enis
dmen
tsee
esan
kino
umor
dvds
exam
enue
etic
icos
ntoo
kong
cite
sdog
dbid
noil
dguy
nted
ksex
arco
rats
ongo
cort
dles
rake
hiro
ykid
khan
sits
ybid
iker
ques
dang
tlab
arie
nput
afia
aide
rbuy
iety
dash
nows
luxe
iant
wake
outs
tern
lima
-job
dmap
rbid
glow
enor
uess
dium
yjoy
orne
urns
lfan
ilia
inan
rino
ttie
etie
ixed
ixer
ixes
osys
ycan
mana
anti
meat
papa
aidu
hick
rset
guan
rita
ndor
ivas
ytop
tary
lano
jiao
lyou
paul
icas
ytea
ktop
ykey
aner
erch
horn
cole
edom
ibox
azon
rmap
foru
aton
thus
shou
eave
sung
hweb
rnot
vans
kson
rred
hose
uweb
-med
alam
inka
ovil
pole
icar
inga
cpas
bber
ohio
elia
kbox
kang
fect
erup
ndle
came
ddog
lide
stle
emes
jets
mono
njia
lfix
thon
spas
mmit
mper
bebe
imed
isim
reas
wels
itto
sfit
nano
gree
isor
ngan
spop
quip
imex
imei
eric
aids
emet
egap
dfan
milf
uzik
ybet
nbit
idal
took
bees
viva
kent
tsat
ewho
cott
sexe
anli
etes
cuba
sics
yred
ywin
mash
tara
etty
pweb
uppy
hway
pies
spec
icle
nfit
hane
hale
inja
pins
mack
anor
e100
ndit
tabs
owns
shen
nmen
ddle
cret
ecks
ertv
mina
rbug
cute
mara
snew
ymap
usik
agel
lars
ypop
sina
binc
meup
tish
-xxx
hanh
inor
rair
elco
ully
isha
doff
asys
troy
nred
gard
sica
edup
pods
ilin
meng
pits
atex
wrap
enya
sguy
lboy
dmix
isex
rusa
also
mpro
lani
atin
tude
wski
ntex
ntea
rpay
eban
llan
born
buck
dive
nist
ihua
pest
6888
i365
umni
todd
bash
esaw
egin
rpro
olab
zaar
indy
lper
utah
dlog
sees
ayan
tine
rguy
nlog
dean
ench
tvan
rius
2020
esse
tlot
bach
idas
noff
been
anya
ayes
rcat
alas
ewis
ckit
edry
ocha
dens
dhot
dtop
inar
trex
ycom
ewel
bizz
kone
hout
tres
-air
zion
2buy
-tec
rmet
olds
alsa
lins
onor
paws
mble
dkid
dusa
ntes
oses
iwan
nski
abia
sled
rpet
n123
neon
lcat
tbig
ybit
gren
erms
amar
nmix
rida
hock
grad
sara
onde
sail
cans
0086
pera
rcom
olik
izer
izen
ndan
cabs
yout
pens
rfly
rete
rcia
fety
klan
izon
enko
leri
gida
coin
ipes
loff
tner
kboy
deye
yall
corn
sboy
lsat
rban
elps
etas
elis
ddie
trac
twar
rfan
loat
belt
asha
kbar
uche
u123
ljob
rney
atas
yusa
rfit
anit
kbuy
gong
dcat
lfit
vnet
ouji
emay
xnet
roff
dhat
ylog
buse
enes
drug
lusa
ttea
eira
nown
airy
cone
hani
ntro
24x7
anca
nlar
arel
vera
ouxi
axes
rgas
tpen
ibes
osta
dwar
ndog
erid
acle
pnow
tany
egan
dows
zard
tbed
lica
imon
rads
ials
lden
vale
sser
ngli
-way
vels
i123
unda
ymix
nsee
loft
amon
e-it
rbet
stix
lmap
amez
ltop
ygas
neng
hoff
nmap
qing
elie
elin
etec
amen
yeah
rvis
arly
such
avia
dday
tsum
7777
npro
rfix
ysee
nren
sang
allc
elee
okie
gbox
span
spam
laze
sper
mata
ilar
awan
6688
yama
rpen
sket
ytax
aret
dsat
onas
both
loom
leye
shat
urbo
stea
gion
iton
tles
ypen
tmod
aska
tini
stad
izes
ybed
ndar
rrow
tyes
ssen
rets
hnow
neye
urry
abay
keup
omos
lbet
aled
hdtv
bate
rely
ibbs
svip
gxin
nhot
tnam
ugby
pond
xuan
-sky
sole
nico
gina
elix
tlow
ikan
rary
doil
ffic
sual
teat
rael
s2go
anis
claw
dyes
raze
dcup
eleg
palm
lino
tied
tier
ebel
gays
nish
eend
izle
sive
ebux
otex
dler
reit
ecam
dbit
laxy
ased
olas
n365
raya
wice
yfit
teng
gods
dams
stal
isen
nspa
erto
gnow
lief
nguy
fell
rdie
alve
yber
mare
laid
tget
stik
ults
1314
sbet
neys
2002
alim
sbid
aten
rish
atsu
mesh
djoy
rcan
weed
beds
s365
lmen
obus
afan
lora
ngen
herb
unix
ypes
isys
mway
uper
dave
vate
zhao
dpop
rsky
pimp
ibre
lyer
alle
ylor
udit
hawk
kdog
lall
nbid
dago
anza
dsee
tuse
zart
lito
kare
arkt
tiva
lood
erme
tadd
nhat
nary
illo
graf
oled
egos
eshe
iten
altv
riya
-int
u365
dick
reye
ocan
mand
wman
nkan
dnew
ehub
stwo
lene
lmix
dbug
heer
-vip
kair
gusa
musa
onik
oven
eiro
rior
oids
polo
ycup
cove
ured
pons
tsay
rika
inco
nact
onal
inge
acht
exxx
etch
esco
nell
sexo
york
unky
mary
npop
rlin
ntic
aone
ysat
rico
eeze
dtax
aden
novo
tori
xury
phil
blab
went
adar
hics
bian
thor
pian
npen
eeps
hjob
lyon
pher
isis
dlet
wong
ibar
aura
litz
onne
aili
kara
toms
ndie
kjob
swar
ngyi
nlin
tmet
stoo
earl
ynew
alta
dojo
imos
otus
dcom
ayne
ssan
engo
tash
etup
ozen
pifa
ommy
traw
naid
alee
bobo
hyou
nset
nike
moke
atti
amma
turf
alar
orps
iden
heme
rati
tago
gyou
rlog
tnot
nick
fart
oche
redo
e360
ttwo
opay
sden
oyal
mons
tero
nedu
yled
lwar
yoil
nxin
tasy
omar
2win
opez
iven
mori
ofan
rmer
rble
emas
es4u
simo
nism
owan
nius
ebad
hbox
erer
ncat
chka
guia
aras
loil
dact
colo
llos
tmad
lita
edon
lpop
bove
azil
nbet
oobs
rgan
kari
sofa
olan
rpop
nare
wick
envy
roil
ajob
oone
welt
tler
imai
ypet
xbox
rbus
tant
rhit
erre
ried
etix
s123
kler
nlet
nhua
vina
vana
nden
ndes
sine
dbus
rjoy
kita
agas
esys
icky
rani
nika
niki
anyi
dcan
nesi
xinc
jeep
gart
hday
lwin
pson
ggie
dina
kinc
ipro
rios
uner
1888
irds
elta
clic
sedu
esia
rgia
olly
kcar
rled
pbox
nshi
otik
odka
yset
gacy
4web
plaw
funk
eles
chao
dhit
leme
loco
hrow
dpro
mars
bler
lbus
5555
llie
stra
rtex
ngit
atur
iraq
webb
pose
ivia
erco
nock
rtax
ecap
eraw
oble
lcan
ismo
nana
oppe
dbet
tads
reel
nget
ndis
nout
usan
cllc
warn
rook
cres
eare
urls
lfly
oren
kset
olia
adia
a123
mule
klet
nong
epic
mlaw
sack
ltax
vens
agan
sbig
maze
ndus
alex
ulls
mory
ueue
trim
rbed
nnew
urge
inis
lisa
s411
gala
font
acil
wawa
ipod
rous
knot
lors
atea
seit
-top
elas
gist
tllc
clay
toto
nook
mice
teer
osan
-buy
sens
racy
inst
icus
adog
5888
bala
iken
llin
ijie
pier
ttes
spen
worm
oyle
adin
atum
ybug
iusa
teks
aina
avvy
sese
scue
yvan
nces
nlab
nwar
ikon
hbar
nwen
imal
shaw
sham
desi
fari
nsit
rego
syes
sare
rwin
rtis
itic
tvia
nked
wart
eard
azan
t365
lawn
bbit
afee
ized
lave
dher
zers
kled
asts
peng
lpro
nhui
ooty
mbox
ogin
ampa
inen
cers
bins
lbug
asso
igen
-seo
vite
diaz
yung
igan
rell
trow
coms
ltra
html
mood
ycut
abin
lism
rata
rvan
chno
dgas
glam
okan
-mag
uros
s360
uise
prom
rape
tras
tray
okey
esme
lset
onex
thow
cult
ukan
dora
2web
eeat
ehab
ntit
ntie
teck
ngcn
dots
tama
isky
oban
lida
plet
ubes
arus
antv
geon
ncia
allo
odia
1688
jade
ahoy
mote
kmen
ival
usta
raca
ngin
dsit
lush
glue
neko
1818
rmix
qual
nals
scut
lbid
sfix
held
inho
dust
anow
tect
ekly
kway
cruz
yong
kick
nomy
pped
ngyu
yper
yact
akin
lour
lena
mcar
seem
lize
gweb
idos
lown
utes
itex
nkie
amix
reps
enco
grip
enas
nver
itai
iens
cken
asrl
esso
nola
flux
vely
alid
mica
mnow
ggle
tomo
dere
lure
alen
vada
enic
jean
gave
atta
coon
sget
2222
esel
kker
ovip
itus
lguy
lebs
nsys
kina
aboo
fied
pres
1069
uran
lsen
deng
xact
anji
lkey
cipe
erso
hmen
rons
sies
inky
jane
elon
lbit
rkid
emed
tika
rmax
mali
urst
rits
llus
ivan
pout
sock
atoo
asat
opop
nado
lary
blet
oosh
endi
phat
sbus
seum
mpex
-lee
borg
oten
bolt
erce
chit
chie
anci
mong
yfor
ayou
nery
efox
isms
anan
khat
lirt
ific
-dvd
hype
tlas
ropa
acha
stes
kday
oris
rift
wnet
itin
8000
auce
kfly
ntwo
hook
nyes
asky
neat
roms
died
reis
idis
dfor
eyer
werk
ymag
oras
alms
ssee
essa
yice
eaim
lara
nded
2004
sink
tide
beam
leys
aban
aths
phix
lynx
rnew
apas
nyan
rima
neer
yspa
acao
rray
tuan
spub
yule
ytwo
hsex
uels
dine
amap
dpen
daid
bart
atre
abor
obia
prof
rows
owto
keji
gins
inea
gery
elit
tism
-bay
ywar
clix
yhit
lego
teca
sims
-mp3
salt
-tel
dcut
olla
eleb
iwei
eany
ylot
migo
ptic
dsix
9888
emme
kmap
tted
ergo
ifan
rtec
wson
emia
noir
edes
iban
reco
amba
kwin
lits
lads
asas
nand
skid
nina
ybig
asco
aair
ejia
teur
imap
shah
gens
nari
pups
msex
yfix
aroo
orin
efar
utan
erim
ndow
arra
liga
dawn
tint
ellc
efed
-dev
ssit
iptv
mair
nhit
memo
repo
sean
mari
asol
aset
deer
gged
dias
coal
enit
nled
ndgo
ummy
acia
ngon
pset
inus
okup
kaku
-boy
nsat
yder
edvd
yads
klog
nous
rces
znet
onis
seks
pops
acam
heal
enon
yria
kend
lmag
rene
ipin
teps
etit
dist
inia
init
gars
mirc
pone
yhat
epad
emp3
2car
icer
2345
eier
elik
onen
osky
bela
ndry
rias
esol
hfan
lves
rfer
erby
iyan
edor
ncut
ycam
mimi
pman
nuse
t100
taim
vity
agro
reus
ften
eaux
ahoo
eurs
ytoo
intv
chai
tari
sdvd
thim
issa
cage
lily
rein
yher
rams
duse
iage
oser
ffin
oway
haha
ckle
rtea
ncam
arah
n100
advd
p100
hala
uhua
mson
nher
gasm
eold
imas
psex
xcel
iere
hlog
mero
ssoc
ywho
isch
lube
ubao
afix
t-it
ambo
digg
ndme
earm
t123
ybay
rays
gout
eyet
nize
nbug
vill
lats
a100
tsky
nold
eese
oair
xion
ngji
ypro
ipan
nago
icia
lsee
rala
sein
dera
amia
nita
vlog
-pet
dept
ymod
kpop
amor
lago
node
fman
riot
abet
trom
nvip
icam
eddy
obid
agua
duct
lkid
inex
elim
oned
kery
dlot
aoil
y365
onga
dori
lcut
rett
este
elen
tecs
ttin
mens
ebra
lhit
aded
zang
ncup
affe
rmag
torg
anty
yyes
resa
dtea
eigh
oton
otop
mals
etox
rlab
plas
digi
-sys
rito
lask
bels
hino
tarm
nads
reid
drum
oire
emma
hvac
nfos
rsat
worn
netv
bana
mgmt
vets
ofix
ofil
pter
kall
atio
yoyo
oted
tesi
trat
chia
halo
ndra
ukai
iyat
eite
tobe
mman
-pay
asam
itta
llah
dirt
arke
rypt
isol
gill
erma
er24
reef
0769
ypal
netz
ubed
rals
dtwo
isas
nard
illi
-dog
-dom
rdon
tens
hess
kice
llot
leen
oola
lous
tane
cana
s101
alto
omex
mais
arca
arta
olos
kpay
eram
asti
aged
nhub
keye
seme
qian
crow
taco
ythe
nghe
twho
anse
amil
gpro
enin
flog
bait
acan
ngtv
yget
stit
gulf
avor
atto
isle
luts
toes
pony
dana
igou
amag
empo
medi
dali
zeit
bois
cina
oyan
yler
hera
ntia
lfor
sier
iate
hony
ewer
jang
teas
kbet
ubli
meal
rsix
asse
1111
mbus
unty
brid
alli
agri
blaw
aved
etra
bios
lind
lana
rsee
shub
tudo
sque
ende
irin
mato
ched
1999
isme
iros
yten
anzi
ngus
conn
glas
nero
rwar
slan
lows
mony
fnet
utts
lada
arer
poon
iral
uler
yaid
g365
mllc
asen
arit
nhas
gend
lpen
avid
ecia
iscs
nata
oler
uban
dawg
cums
agra
tgap
mins
aboy
emer
esup
lien
ydvd
medy
neco
nkid
-ing
afes
meme
pdog
arti
eavy
nbus
bcom
obbs
ymay
rers
stil
sama
otti
hari
idee
nack
cups
nchi
retv
ngly
vang
amp3
oils
alik
stus
elux
dmag
lete
omme
nsan
setc
oger
rali
onie
onin
nnie
oved
iowa
onte
toos
yana
nseo
ilet
tria
3333
tcha
irlz
cops
duit
ovia
ovic
amex
kbid
lpha
esas
vian
teve
gall
obin
ysky
moss
rowe
gans
fold
finc
lred
volt
rapy
gler
rtho
cini
obey
ober
epix
ilis
anhe
pguy
ngco
tung
adel
elos
leet
kfix
maha
mela
abuy
iyou
otin
ushu
assa
dten
adan
leit
-tea
opal
sima
inta
ucts
chas
taro
phop
hest
busa
eber
shua
lvia
adoo
phan
tins
lung
lund
erge
lnew
2net
rtel
ebag
-led
9988
ytie
conf
suke
opin
trum
a365
enna
hali
kerr
ihui
llog
kbus
kbug
scup
angs
dsay
neit
tivo
grey
wlaw
n168
stec
nhow
ekit
dost
ngfu
itim
erio
arro
-cam
void
rcut
nthe
asik
cred
erta
kguy
iets
ejam
-ent
omeo
nkit
egen
pbuy
ekan
24-7
cado
alos
enge
uart
nbul
erly
tani
ltec
ovan
ened
mats
pllc
rbon
lele
flew
i100
derm
soda
lhot
opus
tris
nmay
nmag
-gov
nami
alad
poli
ilai
ilab
ilaw
pbar
mir3
omin
ingz
rost
kfan
heim
svan
etel
dend
ilon
cino
ofts
atty
s-uk
estv
okit
niya
laya
pall
alco
rley
hcar
adge
phim
otis
robe
tiki
carr
nsix
otte
u168
mora
axin
kkey
ecry
rmed
oals
azzi
rean
-fit
prix
aple
atie
avan
mies
ijin
-wow
meta
stre
-oil
nfix
rema
hank
evip
udou
inli
rmod
chis
reon
kura
cara
llor
ruby
yfox
xter
youa
esad
ikos
anov
anol
gner
dbed
hade
matt
ttry
rtie
roth
hyip
ledo
atix
enty
omap
dvan
ihan
koil
sbay
ssic
iani
saka
xusa
edtv
crib
anus
lics
dica
seas
ocat
azer
thes
8899
marc
eger
aoke
oove
5000
kles
artz
fsbo
ssed
goon
fist
mami
-all
engg
ndel
boyd
qiao
ymet
ifer
lamb
dmax
idao
apon
leco
nnon
eink
einn
deri
risa
rnes
aosf
onit
o365
alet
inow
icka
inoy
s911
nses
beef
dads
eput
fera
-win
atte
hama
8999
nbao
nbay
lius
ossa
ixie
anba
umar
ishu
ensa
goat
rlow
rlot
dand
akan
klub
pfit
pcat
3721
aham
ahat
miao
prox
nash
uras
rhot
blic
dopt
hdog
etre
boon
drow
heye
nges
-eco
dsum
ilim
inam
ybbs
bleu
loto
osol
rola
edos
pusa
lium
rkit
atal
zman
gtai
cita
mano
ucas
aiya
anin
rfid
exit
rsit
njie
masa
otoo
pana
nome
kado
hboy
ollo
erno
ngup
odoo
atom
ulin
isse
ofer
8114
ealm
yask
endy
ramp
llit
aits
ayat
chel
plog
o100
sler
isma
khot
odan
aldo
erbs
erez
bien
hans
drid
oxin
otea
aula
ccom
divx
aero
foam
raud
nuri
anam
onna
aile
iras
rora
toyz
-out
iong
i360
enry
orma
isos
2580
usen
rlet
nley
oron
lons
tcam
rids
lgas
dove
cson
uyou
ught
aque
vidz
nmod
ndom
ppen
asin
eves
kova
g123
olet
akid
tthe
utch
mino
imin
ksee
lowe
phic
shim
ngbo
hwin
ssin
ssip
ypic
pino
rcon
mune
arto
yoff
zinc
epod
dona
eill
sume
cnow
ibus
nbig
lsit
auer
sins
sact
nsai
kite
newz
abag
enka
epet
-mix
rbig
ksan
risp
enim
ymom
igar
edan
nmax
cted
ngry
kusa
soho
emin
amel
kbit
gara
gary
buds
ovka
tseo
rato
ouge
dmod
mboo
pong
svet
rbay
arda
ysis
ysix
erat
eque
kini
gale
lmet
pcar
hage
kash
tsch
owth
dala
gola
bare
edis
edid
etex
ahot
myth
nban
ylet
ylee
owed
wink
t168
osit
lcup
rona
okin
asim
teco
alat
ttic
etim
loth
gado
arme
eour
obay
sgas
iara
kcat
toro
rski
citi
taku
xman
sdom
snyc
tmax
irth
wade
assy
njin
ombo
hich
scap
dedu
ngme
awin
eshi
e247
emar
edue
dino
yton
atop
lini
tarz
pawn
rods
tfar
ebee
shui
twon
icht
hpro
tsad
osen
hief
hier
merz
veit
daze
eird
chee
reka
gton
sley
rtes
lulu
sxxx
hten
kala
ktoo
peer
buro
chix
-con
lied
ndre
ardo
iska
hcan
sesi
oded
suse
eray
asap
axxx
wboy
macy
eedu
eong
nler
s-it
sbit
tsin
kher
inde
kten
orit
isto
dems
mech
weld
natv
ryes
dlow
uito
mium
omat
calc
egod
egon
anim
aida
6789
xist
yban
rnia
maki
ynes
ugou
agos
t360
ihao
fork
foro
zari
a168
dien
ogou
ohub
adis
loyd
xcom
tous
gway
wyou
edir
ymac
gora
tetv
hbuy
rera
inox
enal
ssay
ng4u
hiba
mboy
ogie
tify
cura
mous
lten
alie
insa
yhow
dmad
uple
ipai
omms
nsay
haid
oist
pati
apay
-uae
mesa
eism
yand
orta
arot
dade
dadd
umen
ilee
lags
unce
7788
alai
raja
nfar
gcom
ckin
dney
icai
rath
mir2
edev
okai
rbal
bweb
niac
igon
amat
lmer
bari
y888
ahan
kmix
roto
rede
-sms
ntum
teon
lope
rgen
eece
avon
amet
ozer
leat
ubbs
kawa
nney
uedu
herm
dore
dorf
fweb
idin
pboy
doit
sago
etip
tuna
urka
oson
yeri
avie
tora
pmap
eown
rtwo
doma
sred
ulus
ruse
sano
mala
tato
kred
emat
thos
azza
iddy
audi
pton
veme
lint
beni
2011
cago
rfor
abbo
abbs
otax
otan
osee
spel
y168
neto
xweb
odas
ljoy
ilya
cong
miya
eago
sjoy
rtus
anew
oboy
gigs
xpro
dley
umba
kane
hcat
er4u
raum
chou
lcam
nera
orst
pear
gics
ysum
kast
ltur
rsys
afox
-sol
pwin
aedu
ocio
pday
nlee
genc
modo
ayed
rasi
ouri
aqui
rsum
quid
ekin
ejet
imen
oyes
arre
l168
derz
omag
cals
cali
ubar
tbay
isee
erte
hoop
shka
usco
ybag
s4me
ayak
ayam
tiny
tans
s100
odvd
veda
ogoo
hois
rdin
nwei
jeux
r365
u114
tuck
ocam
inkz
nose
nsor
olon
ansa
alma
eida
stie
stis
iaju
5588
omia
etsu
ixxx
vinc
nche
oros
ngla
bila
yate
wage
5566
boya
ibuy
2001
atra
nvas
ssat
omma
nsas
lobo
akey
unya
oink
uiet
icio
athe
wist
amic
fred
hant
dmet
glog
kali
egra
-spa
acat
plum
cide
lsan
trio
edam
mcom
eses
eseo
teri
lute
aoyu
smax
oley
mira
azul
teka
htop
rike
edas
obra
benz
lcon
dtie
lmed
ebut
-new
lbed
trad
hpop
cain
eche
clik
gcar
ewas
e114
epts
thub
temp
jung
kens
holt
dtoo
ilik
ogix
glaw
rvip
ysay
dhim
poff
tamp
adev
kfit
ywon
tear
sgay
nsum
rler
ylay
rmat
pled
ulsa
emex
nshe
sala
mang
gfan
inxi
clab
kola
witz
rags
mbuy
kabu
t101
zhen
roes
joes
crop
nife
sthe
ikai
thod
osas
-bux
odog
ytoy
inte
nior
umbs
mjob
rsey
raco
e911
bdev
emom
pour
2014
dbig
shup
aybe
ptin
gues
adom
luse
sita
haul
oseo
tink
rsan
uken
obux
ijob
ofis
erex
ncai
mmed
vage
evis
fusa
nwho
opix
isia
oaks
rmor
clup
kani
occo
rini
tham
nbbs
swim
monk
moni
odec
wska
ospa
faqs
vere
vern
sbug
cows
hled
dunn
recs
mise
loon
loot
tmed
heep
heji
tado
ppay
eval
dran
ltwo
g168
anma
n360
asms
ghui
illz
itop
ably
-eye
d123
mags
hita
sari
anke
hese
kjoy
tleg
gmen
stam
ieve
tvip
-cat
xsex
swag
cala
nier
ecko
kwar
ybad
pete
aski
ktax
op24
e888
i888
orer
feld
ynch
roco
orgy
nany
erox
kool
ndai
prus
pyou
ydro
utie
myou
seco
s888
ispa
omit
essy
ogia
darm
elds
tltd
ritz
popo
cero
alit
ereo
lmod
letv
ghes
athy
enki
e-me
hfly
smin
wiss
kaid
nmet
ksat
naim
ebbs
o360
lume
amis
amir
nito
hguy
yani
ckme
naka
dada
anyu
ygen
umes
toad
smed
rhat
rber
andu
1001
cltd
iosk
sera
rens
osse
epos
ckie
blox
zayn
ilor
wift
inix
nend
tsam
ardy
aima
igos
daim
cote
bara
sola
obit
rowd
zweb
ined
mpay
gled
llys
-hot
echa
siam
orns
afly
veys
inca
rstv
mite
pike
terr
hpay
appa
-pic
sick
unia
ammy
ehan
esor
toit
trem
kfor
bust
diff
esim
rgin
rero
soto
idon
rold
nllc
tima
vega
ngao
ngal
ngam
rmad
suck
u100
lvan
pler
opet
uria
phis
yuse
tego
dara
exin
ohot
rsin
lyes
rago
russ
iamo
sani
rher
emac
opan
llup
hian
ntar
ladd
aude
ouwu
npet
nper
xone
tarr
rkut
rews
lany
phon
loca
phot
ebec
hkey
atia
hwar
adon
sito
juan
otas
owar
khit
ttel
shuo
oody
5188
pway
ang8
joho
hika
auna
awap
enos
kept
arak
opie
emir
oltd
ebot
ggin
ilde
kurt
enne
ohit
raku
wone
kand
rbag
rper
edot
nali
aing
efry
eith
dite
rspa
idus
rque
cato
ynot
lget
a-co
rtoo
nant
wbox
vsky
ignz
eroo
fool
xels
ndvd
ibet
mach
afor
dani
ocos
s-co
uset
ybot
mura
azoo
izzy
olat
crab
illc
e101
grab
ktea
aros
maga
uego
dfed
bson
ocon
d100
tipp
ippo
warz
wara
tban
ngde
uris
sown
-san
skar
nlow
nlot
gjob
ysit
rlds
bola
ctus
-eng
ogon
itee
itea
omet
olie
dico
eist
uber
antu
lher
raff
rafx
itat
i168
ilva
lata
dons
tedu
rogs
orch
aloo
wnow
reta
tass
redu
epia
ndee
y520
wair
etus
2003
lsix
ltea
velo
vela
sinn
bina
seon
idai
ifty
vend
nnor
rren
abao
abal
athi
edie
1988
nrow
nmei
neus
imus
kkid
inon
mbit
pfan
rany
snot
r123
esgo
nran
rims
nser
opas
depo
rsay
xnow
duke
inme
hame
itap
teez
iedu
-bio
nmad
pred
eser
momo
kinz
nvia
wool
nels
hset
anbe
gits
onos
igma
zlaw
dise
rras
kvan
pcom
lise
ardi
scon
hili
buff
exec
lbad
buch
csex
uals
anno
prog
dill
dals
aike
gane
kgas
peye
rapp
yadd
ntur
anle
naut
wala
-day
olle
tney
mits
mfan
reto
mada
mads
anju
dela
yleg
s247
knik
evin
roni
esty
okia
kiya
cled
nawa
onyx
ucan
ctic
ewed
-ksa
epit
urin
bitz
nsha
taka
noto
abus
osme
reso
suma
exim
tots
pbid
raga
rizm
aphy
ng88
tlie
d888
ktwo
sint
ikar
dded
tigo
ltda
dity
ncon
ylow
uggy
seng
ulia
vyou
wser
oule
rche
sire
boxx
opon
movs
aier
ealz
bpro
icha
ylie
rmin
mier
iboy
maui
lthy
aito
iway
a-tv
idom
r-it
lact
tola
ngwu
nutz
gtop
upid
kcup
ucar
ytel
tona
idat
iego
raim
oxie
tsaw
umbo
utra
nemy
dden
4989
lwho
mkey
pect
uten
rtan
hber
9000
oden
lyts
hens
ladi
chop
chow
llab
hima
gico
hred
ofox
iona
wigs
urdu
dthe
4net
grew
macs
poil
gher
naps
klot
iera
gami
itme
yold
watt
tite
asee
katz
ekor
pjob
ffle
rmay
atax
yond
ssor
dyne
elhi
nkin
dose
utas
llgo
tyme
e168
nego
7888
oshu
dres
n888
tela
risi
xpat
egot
ertz
mcat
licy
neal
imix
toss
boke
ojoy
-des
xers
ppin
leds
tmp3
osms
butt
lich
dask
hcom
beth
sbag
inyl
ndao
egem
hbug
ydry
mula
artv
enan
mbia
pent
gsky
gday
ujie
nace
naco
akas
auge
s168
ngou
sill
lios
alor
ehui
vant
enga
tuga
uchi
ppop
saas
ymes
oall
e001
pkey
revo
hake
ifix
smos
yarm
apia
naga
bead
ohan
agar
abad
oget
adat
ckon
hany
izmo
nguo
nora
aley
onts
coil
flor
bail
htax
beez
olux
n114
6999
acai
otao
boro
astv
smet
tbux
nmac
andt
rcam
ueno
esea
esen
alah
lmax
klip
bidz
aney
enme
rhub
ltry
cass
bloc
2all
inin
inio
kuma
e-up
enso
olee
avet
lola
esam
akai
ewhy
amac
u888
lmes
bark
kami
erna
mbar
tium
rava
ahal
obio
teit
ooch
tata
duce
itsu
hmix
tram
elts
eted
niel
unks
ngzi
inci
terz
obei
tdie
nuke
owon
tema
anje
lory
esof
itha
a888
holm
winc
tnik
pmen
esti
mend
etik
herd
pita
onya
ouls
alus
chts
edoo
lova
kbed
ngap
ylan
emei
nshu
dwho
gmap
usha
sant
hica
otox
doom
ncil
zlar
vion
hfit
lado
unto
ahit
cman
bcam
spar
ifei
rewe
usty
ursa
hfix
2016
bums
ptik
ylin
vise
rami
adow
adot
dsin
sada
ikey
sads
l888
llia
lesh
ygap
tstv
eena
npan
veis
icat
nfox
ifax
goff
i114
urer
neta
banc
0999
lula
runa
bord
erds
llme
a360
emic
isin
ildo
iors
nyin
chik
y123
uine
kana
uake
rins
thas
ebio
dbad
tmay
nile
odev
biao
edby
peat
peal
yshe
anak
anai
etts
azin
llam
nink
dung
rawl
arka
ssis
onar
rllc
aiji
imar
shad
gbar
ajia
ecos
zler
pura
ndid
-doc
ssos
tabi
epub
mago
atro
cena
hkid
wned
gaia
swan
abra
5858
asit
omad
dlab
egou
egov
oole
bong
ngdo
esha
koko
acne
lody
lero
xton
idor
chum
tano
nska
cane
dwon
hmap
dier
skip
arve
iane
saki
ipts
nkim
a999
msky
-sat
rady
dspa
iali
iall
azen
maro
ymas
ymad
loha
egel
deen
hbus
vila
rron
tyet
nltd
urve
ided
roku
agem
orca
sbed
-key
ruta
tsme
inee
echt
mout
frag
fran
egro
nvan
smod
mati
midi
mday
yend
apit
hter
isty
apod
apop
ruma
eins
seis
1188
iada
hata
rned
hida
lera
daya
rano
geld
saga
4sex
amod
lhow
ghan
indi
pron
esms
onme
nnes
klik
ovin
atts
ixit
ltoo
onan
nxxx
olts
lyst
flaw
okay
rbad
uana
dant
nbed
nnan
sche
unga
sspa
lbay
obig
riel
ojob
fico
gant
aguy
sors
uray
tise
oked
ndup
orna
blin
lops
anup
puts
tbad
apet
eech
refi
erji
leap
aole
sice
sask
yhim
rvin
edme
tren
ilgi
ysad
meno
yvia
lott
lmar
udan
alux
mpty
ancn
edoc
rhan
edoe
tead
melo
zhai
cobs
oped
ucci
abux
lkan
uble
tsup
mane
ceus
ived
ivet
ngor
exia
dour
hequ
wits
mich
gons
uhan
nore
sany
3456
vweb
uffs
pano
mian
ubuy
ucom
arya
ntam
osat
owes
nabi
y101
a114
zika
nlan
tsys
ngke
rach
rchi
opod
thid
tien
l-it
kish
eben
catv
etis
onst
vist
avas
mmix
pmix
fset
bald
ntee
osed
aiti
hien
ilas
kmag
stup
e411
atka
xedu
adix
bane
b123
eful
ratv
aira
hiki
ytec
rung
awar
cfan
toni
cedu
n101
hris
keme
ercs
noid
erca
chio
acos
kuri
uhui
pecs
nale
eous
oque
mona
0888
iola
ilfs
entv
rear
kulu
zluk
wize
e-tv
choi
chor
dvia
arex
arey
vero
nang
ioni
adre
gres
chme
ktie
karm
rawn
juku
tlan
soma
dule
yago
asel
orod
nhac
geng
isal
isar
mguy
dika
itoo
ndin
pfly
n-it
hada
rast
usch
yaim
oong
mtop
imag
goto
teli
sium
sato
crea
kier
fnow
en24
eara
peti
edly
cano
wens
deat
fora
g888
1car
hlet
altd
eapp
stew
saku
swer
ypix
omei
nsin
iggs
goth
mpos
omon
lico
r100
inyu
radd
tdry
mtec
lspa
iltd
lava
ndam
ndal
cjob
cbox
apic
ltan
deem
ytry
slet
gget
diao
jive
itao
noni
o888
ulti
pars
anqi
ncha
afar
zeta
arby
inup
eung
nwon
lard
ndem
urts
deon
s4us
uson
tspa
kpen
cere
cera
srow
alix
hego
iene
eiss
toku
stud
gson
turo
nsam
aute
rres
abas
wout
maza
ts4u
neup
remo
ltin
ksin
pkid
weat
nake
rcup
acas
aben
vive
hams
ogas
yxxx
edad
edal
gdog
onez
tere
nbad
turl
acco
alal
trus
tays
bric
dmit
ixia
hamp
yota
ibya
loji
leto
tepe
w123
syxx
nent
arde
-map
-mac
hton
ujia
lsky
cchi
niao
rify
amad
amai
kamp
whow
pree
baru
ivre
mlog
thru
oare
ewow
enup
lkin
reda
guro
itsa
edix
ixon
ipop
echi
ojia
yraw
yran
eltd
ngge
6000
onew
ikat
hnew
wald
mika
arsh
arsi
sdev
leas
obed
teme
tdvd
atee
-ken
hols
exco
wana
ilio
idio
vary
ntil
owme
mena
yvip
ceit
lmad
dahl
daho
hery
rgie
jams
yres
ycon
alud
icor
liya
ahin
lbig
epin
fbox
akis
ooka
ooke
gnal
itur
eski
ylab
aica
rmac
ilux
awks
ssas
dltd
phia
phit
roba
otty
iset
noor
ushe
pmod
1114
rxxx
gjia
nori
sans
sane
hice
hico
mfit
goss
etos
pand
rlay
odie
nomi
cron
reup
seli
kadd
bozi
ikal
rofi
unta
kbig
-bus
eant
ptor
odos
imp3
tard
mjoy
ulis
rsed
opol
pigs
veon
ytin
suki
xart
tapp
oxer
nere
sadd
lest
merc
zica
vein
ttec
cian
wore
hgas
bani
obug
tiao
angi
eces
yons
hike
ebao
ictv
-123
olfe
rbor
remi
ngua
pubs
smar
ktor
josh
mayo
araj
neva
bora
peel
fizz
anem
ilds
bold
ybux
kerz
drom
yeat
carz
remy
ecal
dbag
eadz
sker
inji
yown
kget
nlay
chos
hira
orsa
anar
anad
swon
8588
kimi
loma
ninn
rude
mktg
urda
raws
ibei
maca
ikom
tbot
scle
iero
gama
acho
gmag
asea
arif
olay
-bbs
tcap
tomy
rido
an88
amtv
naro
idit
ghua
unde
mset
l365
talo
indu
rasa
itra
tyre
arol
g100
roit
aocn
anki
ondi
tley
n-tv
umax
foil
cios
entr
ipps
bdsm
nkel
roon
olen
isec
shki
gboy
koku
amus
rbux
stok
stof
enza
cuit
kask
ihai
beri
medu
ianz
peri
l123
-gay
tdns
tila
-inn
-ind
hbit
itgo
marx
acro
iale
gsex
apin
anso
liam
wbar
alme
gana
itam
sgym
ipos
eatz
doni
-gen
akar
eyin
mein
vins
nchu
actu
vani
nxia
ucha
semi
inez
yant
yany
mlab
oora
ovac
ngju
klar
pcut
enen
raba
z123
ocup
alka
abat
ogel
rale
glia
cpro
gwin
bung
eise
erka
onta
ldoc
snob
aisy
nsel
tsms
nkus
msat
w168
rran
umei
fury
ogar
edao
edat
baka
kama
-men
levy
apex
teru
turi
cter
ngto
sere
uinn
swho
galo
suri
inko
inke
tros
dtry
-pal
ipic
a-bg
yrow
-tax
ofly
umas
hars
abid
hifi
ouga
smay
smad
vior
glan
glab
pont
tcry
riki
scos
etag
uron
dany
amal
uery
2che
mbai
mbat
styl
mred
geme
kthe
rien
eggs
pago
-guy
lldo
d365
ptoo
hevy
zier
onco
teto
unez
obet
redi
icel
dllc
gure
glee
acup
aoba
ndue
rige
3888
ewan
rthe
coll
lski
arse
obel
rara
owoo
arna
june
dhow
sasi
otry
-war
wand
ilic
omtv
acme
okid
vail
unow
renz
kidd
rtas
osoo
nken
dery
enyc
ollc
pion
apts
8866
okon
-pub
teak
nsus
etka
lmp3
finn
zled
ngay
ngad
atam
dova
pune
2get
afit
elar
tdot
tdoc
xmas
owit
nual
ibro
twit
kcom
rgue
etme
1job
ptax
ulum
uluo
ligo
assi
yoto
rdvd
mfix
odis
noma
avez
ilou
sbbq
gtea
urum
opat
xlaw
lett
owei
wcar
ieux
eana
odor
odom
lanc
lyan
rsen
raci
ugen
thia
mute
bump
yink
4000
atit
aval
abby
ajes
sado
ppro
lesa
otam
eent
cunt
ndat
plow
logg
rted
airo
ofin
aldi
inny
inna
inno
jaya
tony
mmel
ngue
bbox
bito
ikis
isio
mole
oboo
humb
huma
ivip
erix
neme
chid
yzen
erro
dron
cari
isko
dban
kmod
elop
odex
bias
ssus
rdes
reau
dito
roos
n-co
itti
wold
leus
dsaw
hird
peas
hurt
hila
emus
ecca
anah
ljet
hime
ettv
nino
-365
stgo
reck
2114
maco
gnew
dama
elsa
imat
tlay
psee
shao
ouds
lika
gile
eons
rleg
kata
quet
isat
pjoy
narm
elus
undi
ssol
-log
nees
l360
ingh
gran
avin
orio
orid
y999
utai
tpal
atil
gair
itie
stag
swat
vase
osha
omac
warp
akim
satv
sate
anik
ised
tfed
2888
anut
keat
stoy
rkan
gwei
idoo
chup
c365
agov
egay
uage
gfly
sifu
ribs
subs
dfar
aiba
shis
mltd
skim
itei
ssie
adda
wain
ntme
deus
ufan
ajax
wert
awns
erpa
dasi
anex
hbid
enci
gaga
ocai
pegs
0123
grim
olot
e4me
utin
shme
usia
vetv
enat
asto
e-co
stia
stir
eaty
edra
ranz
omie
ubin
luke
ncho
4444
koch
ldit
ndev
t-tv
6114
lenz
yeur
veld
boda
oval
ohoo
lami
enee
tput
tsol
irez
sace
idad
wpay
brat
rrer
pbus
eame
laka
twhy
coke
op10
omod
s-jp
dodo
ulos
meso
a-uk
imum
ksit
seva
aleo
vado
icki
mbie
t888
fern
toob
ldon
eile
rere
dmed
ulla
hsat
nray
nraw
gyan
h365
onka
umer
ghai
ghar
wway
pore
d-it
bake
orkz
kpro
-srl
yric
einv
h168
nima
ssel
bats
nned
itro
alak
rltd
ghot
mbet
itum
ocho
comb
ipit
ckis
leta
rosh
hini
uela
ezoo
sony
icad
wtea
k-it
dgap
mire
lnot
yedu
tsan
scop
a001
mbos
etan
hllc
amay
uist
ing8
-toy
mply
abot
ahar
akti
lfer
kasa
anni
fica
esca
ionz
nask
enut
reds
gere
erle
rarm
kyes
reos
twas
ojie
ds4u
eltv
klow
one1
onee
l411
ng24
ygod
arsa
ubby
rari
8008
ywhy
ntao
ls4u
bild
rlie
nged
kido
eory
unis
lvet
eeka
inat
osin
whip
omba
ombe
ehas
hmed
stdo
elma
s-us
yhut
hiya
goya
apri
kaye
kiye
iatv
evet
bled
naya
gads
adet
2own
kour
tami
jana
baza
obad
nmp3
hpen
rham
orde
s114
i520
kcan
rdue
atak
knit
cits
avox
hmag
otie
wusa
owie
oage
-aid
ngol
uder
uden
exis
yaya
ngku
ugan
nord
hash
pico
pica
t4me
pans
pang
pani
odin
ttos
atme
sbbs
sela
alsh
plat
ehis
loja
lasa
otme
osay
moth
nkle
ohat
ndou
limb
atoz
cham
seno
izhi
eiki
ibit
oula
rode
lems
cata
yinn
gaya
enex
lali
yasu
endz
dify
dest
uyan
tbio
cias
heds
ssam
dlay
hora
coes
rsal
rsad
ryon
e-jp
adir
adim
nete
omp3
veto
tcon
dogg
ebat
opos
oems
aunt
odai
inns
bran
baum
eass
evie
drip
oyna
ldus
dsea
cedo
tbut
otem
icol
emie
dfox
irma
o-op
inle
erci
liva
erdo
voce
ulan
ulab
carl
thao
slap
-gps
leur
leup
tmac
rdev
y100
ynor
llby
msin
aila
avto
dhub
shco
diot
7000
nind
-360
igne
heli
basi
mish
ayin
kard
6868
tbol
ulaw
jury
ooby
isou
jeff
shas
wdog
incy
stex
tada
reem
oduo
oste
nlei
ddha
orow
cism
nhai
-sim
llee
hoca
owup
-nyc
msee
tala
rask
mhit
arou
tena
hitz
oric
orie
sarm
erns
ooni
ecim
nati
nato
oyer
moff
gaid
itia
ebin
erik
ilus
ppet
ngye
gfit
zpro
ergi
rwow
sata
ihoo
-sec
-sea
luca
hoon
digo
tzer
ieta
oxxx
saim
idoc
tinh
ermo
chus
lpad
ugle
cang
agoo
esuk
deai
eken
ckel
vedo
imia
fore
shia
r168
diem
skis
skit
itez
ohui
ohua
ppie
edna
psys
gros
jain
fyou
jock
olit
ajan
bbin
owho
ipay
sbad
shok
dics
inya
acre
-cms
ilts
t114
adus
ifes
bano
cboy
nsol
nbux
ymax
orat
util
ohns
tytv
kley
tial
hnik
tpig
towe
wday
ecer
ydin
ydie
akal
syon
aget
sila
meit
vini
inuo
tasi
mavi
iele
nost
rkin
epig
kiri
tote
vell
lhat
ovat
alip
suji
pcup
haka
rabi
ysam
saco
ckam
idam
mico
nsad
veno
k100
uton
atha
liki
tomi
1983
lynk
oggy
dura
dnot
egps
rbin
dern
omom
pats
lere
apal
ipix
tgps
mler
moil
cair
kees
ryit
k123
rtys
enio
ojin
ldom
udos
kaka
aani
evue
nsec
ortz
orty
orto
olum
olus
t411
igas
0web
ilen
rela
vivo
rrat
tpic
nona
edar
ophy
svia
cant
judo
irli
apen
nbag
keno
hula
hull
oidc
sert
dius
kken
kusu
nela
aneh
coma
pena
ween
blok
gyms
unan
amco
bway
abis
lisi
0755
atry
-bet
ardz
c-uk
ysin
scot
rseo
dink
epat
dans
kink
dane
amam
akay
uero
azhe
icca
tnor
whom
1860
mypc
rhim
emps
meda
rmon
etco
andz
vica
vico
nasa
jima
sora
osur
rves
tace
omps
ntup
-bag
egle
m123
tbbs
rfar
rzen
onel
xray
ulas
dpad
uces
alys
ilog
a101
epth
acka
wton
wred
oski
leak
rwho
obes
a411
dele
heri
dorm
doro
lois
ogps
ithe
gpay
wani
umin
rsol
ilit
-off
vare
osia
rvia
s-up
w365
ioso
arls
eezy
hdvd
dren
liff
spix
onyc
davi
ouis
kmet
gada
ndby
zuki
wful
rcpa
jani
elou
sohu
lees
pnot
ancs
te4u
ywow
jerk
akit
sgap
mell
ixen
nfei
fake
rlee
rles
ysys
ylas
0898
n911
atay
gtax
plea
webz
uayu
taki
sali
st4u
zzer
tsus
npix
rfin
resi
lsum
awon
atco
clam
ohow
ewit
rsia
pbit
mick
yogi
nors
raza
dodd
scal
bogo
eetv
nzen
bows
rlar
zure
wfly
kobe
ntix
plab
nify
rais
golo
opad
belo
8123
ntag
edus
tiga
educ
owel
elex
8688
pale
anpi
s777
linn
chau
codi
yaku
wipe
udia
pthe
iati
bend
fitz
uver
mova
nade
mets
meto
huck
psit
psix
shus
ivos
hjoy
rfox
twow
icho
rmit
mmie
nois
avar
abba
nisa
rsco
yasi
puff
uhou
adoc
hfor
ouni
ripe
balt
piel
idou
mery
eenz
yuki
uevo
awgs
abio
chew
mped
eodd
rsam
enji
gedu
uker
nute
adie
n520
logz
dogz
eela
eele
pltd
odao
""".split()
for index, name in enumerate(POPULAR_SUFFIXES[4]):
SUFFIX_SCORES[name] = (8000 - index) / 12000.0
POPULAR_SUFFIXES[5] = """
group
nline
media
world
music
store
esign
forum
china
games
house
space
today
style
india
video
radio
trade
ravel
hotel
guide
photo
homes
money
books
power
phone
poker
place
sales
ation
press
works
green
tudio
ports
signs
enter
sport
share
earch
lobal
print
light
movie
stone
links
cards
deals
watch
obile
board
gifts
stuff
point
-shop
party
stock
ealth
sites
mania
pages
girls
water
sting
magic
dream
-tech
email
films
times
image
korea
twork
tours
irect
eshop
drive
arket
logic
loans
tools
clean
nergy
match
ision
ology
eview
files
aming
chool
parts
sound
metal
trust
model
night
ealty
index
daily
track
state
japan
notes
ystem
flash
event
right
vents
pedia
voice
amily
story
first
glass
ebook
force
click
ville
ideas
study
brand
guard
names
train
sense
guild
price
tions
shops
table
speed
aster
start
serve
ortal
happy
class
inder
ating
white
legal
woman
lines
smart
alive
ffice
audio
touch
paper
erver
eport
apple
shoes
women
river
acing
local
hotos
esoft
order
score
blogs
thing
eland
words
-info
goods
porno
-club
solar
heart
emlak
scene
field
earth
ource
rance
facts
cycle
truck
stars
topia
fresh
dance
smile
black
build
ental
block
crazy
cheap
brown
eclub
codes
value
leads
omain
ovies
otech
buddy
sland
gency
ehome
lanet
think
sshop
funds
enews
orums
teens
cover
trans
ideos
redit
eauty
quest
staff
stand
asoft
quote
ashop
shell
views
shows
ction
stick
sight
stage
nvest
check
learn
count
itech
dates
atech
elife
sters
small
zilla
eblog
union
ading
horse
trace
urope
nshop
esite
scape
etech
-news
rvice
4life
craft
young
rates
speak
stems
lists
ehost
asino
there
inter
osoft
otels
oshop
doors
-life
elive
fight
prime
truth
frame
angel
oogle
offer
plant
ering
otors
plans
ishop
-soft
seven
pital
south
range
einfo
tnews
daddy
mages
again
ranch
court
forms
boxes
tones
rooms
foods
overs
lands
peace
smith
elove
clock
iving
slist
proxy
anada
round
eline
coach
tshop
-clan
ecity
stech
elite
front
yshop
agent
aware
ights
nding
marks
lover
thome
depot
cross
sclub
ready
floor
warez
tclub
north
scale
ntech
forex
heads
alert
alarm
chair
hurch
graph
xpert
shack
thost
gamer
udios
plaza
etime
shion
bytes
ecard
anews
spain
clear
cloud
salon
lower
extra
tsoft
-mail
label
unter
miles
wheel
write
dshop
elist
nsoft
-zone
acker
egame
oject
-team
chain
break
rocks
rules
odels
nking
rader
sblog
eight
ahost
color
worth
otion
jones
talks
clips
quick
eplus
eople
steel
route
ecare
elink
pizza
squad
occer
eking
snews
etalk
scope
album
aobao
epage
epark
tlife
child
locks
drink
sheet
sharp
layer
-host
-love
autos
total
octor
stats
sugar
elect
esale
shome
ridge
nclub
hange
reach
proof
ccess
great
nance
feeds
slife
nnews
awyer
ejobs
-home
focus
matic
tmail
espot
picks
efree
mpany
ecure
talia
eless
oland
erman
eplay
treet
admin
sleep
eroom
level
motor
etown
hands
alife
aclub
loads
where
eshow
babes
eball
youth
tness
patch
plane
eteam
spots
dlife
trick
ining
faith
egirl
oblog
texas
rshop
berry
calls
tunes
edata
ntral
nting
rasil
about
minds
storm
arden
title
ernet
skill
ebank
shape
glish
sinfo
haber
tsite
basic
emark
eting
eband
human
ocean
teach
dhost
-auto
tinfo
ysoft
ylife
tlist
ablog
ideal
tline
iness
shirt
ewire
oster
maker
ender
yclub
shine
nsite
shift
paint
wards
slive
epair
noise
chips
found
beach
eries
banks
tcard
three
nlove
eller
under
pride
ewear
plast
pport
cares
moves
owers
efood
esort
sells
erica
inger
ylove
upply
lease
ehelp
vegas
miami
ecall
epost
spark
lshop
-corp
ewell
tland
-game
eblue
ality
enjoy
cords
taste
tures
alley
erate
oryou
llery
eplan
arena
ecode
tbook
smail
esafe
-city
uture
offee
adult
award
steam
eader
plays
nnect
tspot
reams
nblog
yhome
ssite
trial
linux
vista
lunch
emore
tlive
kings
4ever
pping
tjobs
mains
nited
drops
tream
etter
-blog
gital
ature
rtech
cases
vital
nders
funny
haven
efire
ahead
juice
linic
limit
yhost
solve
eloan
ework
tball
efile
etest
promo
eface
alone
splus
iller
phere
erson
topic
ehere
death
edate
ewise
ounds
bites
dding
ynews
llege
-plus
dress
isoft
amers
ncity
ebase
nhost
tware
tplus
ounge
catch
pharm
posts
walls
needs
lance
candy
tings
efish
tlove
-free
bound
pixel
tcode
visit
ygirl
dubai
efarm
nhome
brain
urvey
entry
eform
esell
acorp
ebest
hello
anime
lucky
ethis
pills
jeans
close
grant
eways
tcity
lotto
tpark
trash
tbank
eeasy
ckers
stalk
angle
sbook
taway
laser
lives
ewall
piece
olive
bears
bingo
tring
efund
yland
finds
users
tplan
ecafe
licks
owner
scorp
items
yblog
epass
tests
ecell
handy
scrap
edeal
woool
farms
emind
entre
ister
usion
tores
tlink
chaos
orner
actor
ename
onics
diary
cable
ninfo
etree
trees
bucks
inner
ctory
claim
omail
queen
efilm
ather
other
robot
omics
vault
grand
etail
rlife
ouses
egood
estop
epack
ement
4free
s4you
shoot
ennis
ecorp
eaway
ewine
-link
holic
tpost
waste
rland
sfree
ewind
heels
edown
eside
rsoft
inema
ahome
hones
iblog
villa
kshop
iends
clubs
ecore
nmail
short
egypt
digit
going
pload
lecom
-site
thelp
ewest
bsite
ssage
shost
oclub
shing
rofit
tcare
uning
teeth
roker
dnews
elady
apart
eback
tface
fancy
ocial
dhome
njobs
tfree
ntime
twell
andco
ezero
tgame
ymail
sband
efast
oking
novel
enote
cheng
tgirl
bella
strip
ademy
eload
frica
efind
elook
crash
ezone
dclub
mates
ehead
tdata
ssion
welry
rclub
ntown
nside
harma
verse
asite
ester
suits
tblog
cause
troom
sjobs
astic
spage
emine
exico
shots
trend
techs
ehair
arter
shelf
etrip
rblog
utlet
ircle
tsale
boats
rmail
itter
blast
nsult
reads
epool
suite
eague
joint
tteam
ondon
teasy
sucks
micro
lland
riend
trips
tfilm
paris
rhost
epart
twear
scafe
ution
dical
along
rcity
ttalk
tfast
saver
tstop
decor
alove
mtech
bikes
eidea
ebits
treat
built
chart
elock
ehunt
ajans
large
acity
tpage
tbase
ckets
looks
4sale
tutor
chine
efeed
ylist
onews
pussy
roups
tdoor
opper
twise
escan
medya
rnews
erlaw
vices
pilot
arine
brief
tband
rapid
urkey
freak
dvice
enice
ocity
shall
dlive
ohost
nfree
azine
acafe
swell
aland
ehand
rrent
elong
grind
march
mshop
rever
ttime
tshow
rline
oline
enext
wines
rteam
hirts
areer
eaven
ables
nmore
unite
twire
hosts
nlong
ncorp
mouth
iclub
tasks
pound
holes
etext
llage
sbank
cargo
sgame
esave
ncome
tdeal
edoor
meter
spell
dsoft
witch
arage
scity
kills
ebody
nails
nsale
tdown
tking
troll
lsoft
88888
ancer
error
coder
ghost
ltech
sbest
treme
comic
rning
ehill
atrix
still
estar
fruit
onkey
ttech
sheng
emall
dinfo
aplus
tbest
tcall
olife
tloan
efine
erise
condo
tview
woods
ohome
rings
tmark
grace
bhost
-bank
awaii
doing
ition
nland
orama
verde
efive
stown
honey
cking
tfarm
sfood
tfind
major
reply
tlook
etype
nsure
scare
fever
-navi
ncard
lking
ffers
ntals
ngine
nplay
sonic
older
iland
rhome
ereal
nlife
ytech
amail
aking
panel
eride
tters
abook
eword
slove
ecast
ksoft
tique
tocks
unity
cakes
nbook
ching
4less
falls
polis
thead
emode
tpass
split
-land
amall
tooth
toner
tname
tsign
ampus
forge
kmail
eturn
rsite
folks
slink
4kids
yinfo
italy
rider
cript
xport
tease
-star
otube
geeks
eroad
reyes
inews
icket
-mall
ebear
ygame
shang
chile
tting
being
ytalk
annel
tback
linfo
sgirl
icity
eroll
splay
phase
quare
szone
rsale
issue
eable
ylive
kitty
eonly
sview
ewide
erweb
maroc
epath
onweb
rends
judge
nsaat
ohbet
ecash
etune
ainfo
eover
pulse
relax
ngame
ndate
obama
etube
atour
rking
hobby
eflow
huang
tmore
ilder
mobil
medic
eater
globe
rinfo
esure
ysite
bible
pring
nfire
nteam
beads
ethat
eagle
tload
uster
dtech
ssoft
islam
slots
ecase
nbest
-porn
anner
terra
hours
iwang
ybook
tless
dings
rlive
erush
olink
lmail
nlive
month
mpire
elike
hings
eread
dmail
rides
ttery
shark
raise
parks
ander
ogram
tcell
troad
dbook
tflow
stops
emove
rafts
tiger
efull
epick
ogger
tplay
allas
dbank
owing
aders
stube
dcard
ities
ctive
xiang
npark
ience
tsave
buyer
years
tport
twalk
ebets
lhost
nplus
nshow
ehigh
tates
-live
erock
splan
sweet
tpack
eloop
shelp
ycity
lings
lders
epipe
allen
tdate
mundo
tsafe
lnews
llion
votes
aline
erace
bride
ewish
ctech
lding
pools
ewalk
efour
lclub
beats
tform
wings
edrop
eklam
nlink
dland
oints
cents
trail
ocket
nbank
itnow
yline
oodle
songs
blank
knews
-line
ybaby
azone
ebars
tarea
rents
manga
ttest
ajobs
ehits
racks
nture
oring
tcore
lblog
moore
forme
anger
amera
tlady
evice
hacks
ilife
ebyte
ngels
etell
folio
emade
esnet
thunt
tblue
ytime
token
faces
ncept
astar
iance
motel
tsell
ayday
-asia
epoll
kinfo
nable
pdate
hoice
dspot
tfeed
-cafe
slate
ogame
leman
sesli
enine
egone
eluck
mlife
ttree
ehard
nasty
mails
james
yfish
ozone
trate
khost
-king
erain
aging
ntest
sline
mport
llife
tokyo
-tube
ealer
ngers
solid
nfood
cious
lling
every
venue
esome
ecopy
lodge
later
dgets
dwire
tower
tribe
knows
rbank
tlock
mclub
tfood
elift
dcore
dfish
dsale
twins
tfund
alker
etask
abank
ebuys
using
-test
dpost
ssist
dlist
ysell
nload
final
erart
lumni
sking
erinc
shair
urnal
habit
ewhat
scout
dtown
ncall
dmark
-cash
imail
akers
dcity
icker
efill
-book
sroom
pirit
earea
tapes
klife
oving
ycard
baidu
utube
etour
genie
ylink
helps
ndata
guess
saint
eslaw
virus
whole
rvers
ihost
dline
sides
marry
ragon
nmark
davis
eship
scort
lteam
oards
tzone
alink
tfair
esize
-girl
ntalk
ewill
dgame
treal
tmode
skins
efirm
dshow
twall
gross
thill
attle
eunit
ebids
rbook
npost
cmail
tfile
chick
baker
nlist
nblue
nspot
ngirl
drugs
yfood
tanks
stime
ywork
ments
units
edesk
sonly
dteam
boost
apply
acher
dloan
yshow
plate
etics
dtalk
ehack
muzik
ttrip
ernow
echip
urity
sfast
edone
ility
tsure
ihome
nroom
dwell
tscan
etank
ncode
afood
ejump
alist
erest
ative
ebaby
dview
alent
takes
lbook
ngxin
afety
boots
chang
dwear
s-inc
kland
ypark
andia
wyers
elead
ehole
evote
yours
eporn
lhome
plist
sword
orest
eedge
msoft
ilove
altor
mafia
ecret
ockey
alace
yjobs
nball
oasis
tbill
tside
earmy
rtown
gains
entos
efair
ttext
owell
ytown
sdata
dsite
azaar
nwork
ustom
esend
ofile
dplus
ltalk
attoo
dmore
-talk
yking
tnote
devil
input
dlink
ndeal
ytube
scard
teria
rints
erbox
4host
emake
yview
float
erisk
lcorp
ewels
estep
nlock
youxi
ttown
stour
tdrop
never
mnews
pnews
agame
dpark
rwear
tedge
eedit
efoot
ababy
itall
spend
nwest
shock
tpath
dable
ledge
aring
asing
tpool
jewel
shere
musik
tonly
bliss
dband
logos
dless
umber
tgage
dirty
tight
sgood
twest
ydate
psoft
movil
thair
dname
ocash
imple
twine
balls
tword
arker
after
rtalk
ncare
hills
dgirl
mcorp
maine
kbook
rcent
turbo
broke
anlaw
ninja
tfire
-gmbh
tties
e-web
rlove
pster
sushi
menow
llove
eopen
tonic
carry
okers
rugby
edits
-tour
kclub
yteam
sloan
sions
ocare
dcode
lsite
-jobs
egain
ybank
dking
entor
metro
sauto
icorp
puter
djobs
mhost
tloop
mblog
nager
droid
walks
ntour
rcare
emeet
tcorp
ecost
-kids
twide
dback
begin
esnow
nster
loose
dfilm
dfast
ywear
anova
stein
heats
iyuan
bitch
visor
adies
tidea
ummit
oinfo
grade
lgame
nwell
achat
ingle
thall
ekeys
ycash
obank
nstar
drama
ejunk
opark
dplay
tship
dblog
tcost
nwire
yloan
tdesk
oshow
swing
emass
tmind
spost
-mart
phost
yball
nzone
flies
deasy
obook
-chat
droom
atube
entai
nique
types
bling
nhelp
tcase
rfree
osite
ewith
efact
dcorp
rally
ppers
efeet
atalk
theat
lcard
yhill
waves
rless
racer
ctors
elots
valve
kmark
liner
pause
xshop
dring
ronic
omart
terms
tlike
ytree
ddata
ybest
tfive
skate
hools
rzone
nview
bonus
ewild
nwang
etrue
esart
etape
maria
topen
markt
bazar
pshop
dwork
daway
backs
dbase
hound
rface
dfree
nhair
ecuts
tlead
talog
reate
btech
ssale
socks
alaxy
asset
ocker
urner
dlove
bands
rlist
usoft
aylor
tboat
eease
tfish
tride
rfilm
tyles
ycafe
ystar
echat
nners
yspot
fotos
rlink
istan
alice
eplug
ycare
edays
aprop
rship
refer
cream
tales
epile
ydata
nfish
toons
tzero
scent
emost
rcard
audit
tmove
above
eeyes
eboot
lewis
-time
nkeys
eboat
tbody
emart
dhead
spray
eauto
dfind
elief
csoft
eters
wills
arnet
onder
astro
edeep
tches
iling
marie
wears
belle
ybear
pclub
sfarm
dfund
nface
dface
safer
hints
acard
ewash
tical
ddict
uxury
uwang
nband
oston
trush
yrics
ilver
yplay
etnam
ljobs
thers
rgame
kline
ustin
kcity
ksite
ylady
dtime
utter
piano
gmail
ebill
ushop
cshop
prize
tcash
ypass
ehope
yplus
dlock
queue
dstop
egold
illas
ginfo
quity
ookie
corps
scott
pairs
itour
affic
nters
eware
ogirl
tfull
ndoor
egear
lerts
dpage
tlift
akeup
tways
early
fence
ypage
kblog
lfish
ciety
clark
rplus
2shop
tpoll
etake
-list
eties
weeks
rcode
coast
eleaf
nitor
ntree
-town
thand
nedge
dhere
npack
ycles
ervis
epeak
gator
jumps
yfilm
lline
orall
smark
dfood
strap
ktalk
earts
ddies
etrap
escue
dsign
ourse
ntext
yband
blues
imate
tbuys
karma
abuse
gclub
-crew
tpipe
ysale
tires
puppy
dwest
ishes
dsafe
mpact
srael
rucks
eself
kelly
wants
oteam
untry
ntong
rabia
e4you
chong
emain
elets
nmall
ooper
coins
renew
egrow
yname
ifeng
thole
dcare
nwise
aling
aptop
ddate
rties
kpost
traps
artin
plain
ebite
egion
ipper
nplan
nfilm
olove
royal
alnet
gland
nhead
dware
dwide
amond
edraw
spice
rplan
glove
ption
epain
euser
scoop
yhelp
tpart
nsave
nauto
ybody
esoon
sedge
yfire
naked
ivers
dolls
rport
ghome
ycall
anama
yfree
ostar
ncell
nfast
ycell
xhost
innet
beast
hoppe
gsite
asian
tbids
agirl
vance
kfree
arate
ldata
dsell
ofing
tsize
nning
otour
tcure
arson
twill
rtime
rcorp
seeds
igest
erboy
saves
edisk
iting
smoke
coinc
uides
eekly
rtner
adget
paces
upper
lplus
yface
nlady
dpack
throw
ejoke
-cars
grill
mited
ktech
iners
dfile
10000
amart
plots
ewang
otica
erbar
nlook
rdown
nhill
stroy
savvy
ndone
npage
disco
yfarm
gsoft
impex
tcopy
rfood
anson
ondos
eflat
uitar
cture
tover
dfire
assoc
ydeal
s4all
unkie
a-web
ekind
rwire
rades
epaid
ammer
pzone
inart
gamez
psite
rspot
ymind
nsion
amuse
dlook
ywood
sdeal
ntact
rshow
rtube
klink
udent
ayoga
spool
olist
gshop
below
flyer
alias
onica
mason
ercar
nroad
asket
trong
dcall
renti
egolf
efeel
eknow
kbank
costs
eneed
rways
flirt
opoly
tnext
atino
yhair
roots
lbank
darts
awang
erday
dbest
lmark
dmind
ezine
-tips
agold
nport
rhelp
lpost
nform
ident
ykids
lcafe
means
birds
gging
tbear
estay
-card
trunk
ebugs
ocafe
tfirm
tered
ornot
hines
based
tread
dtree
sgear
ehold
avels
lmore
reaks
dword
lopez
ptrip
gates
adams
bloom
ttype
ywall
ndows
razil
blaze
kgame
tcafe
-gold
ricks
eness
rtree
sclan
hsoft
ojobs
nexus
sshow
dhelp
nsell
eitem
iklan
ehour
alter
ranet
anweb
matik
egacy
ateam
roman
izard
dtube
urses
rpack
tgood
iders
ltime
orman
reers
wells
rtist
tstep
steps
utech
ypost
ktown
allet
rator
twith
nhand
eones
annet
yroom
wfirm
pearl
sauce
ector
ltown
orage
tpick
ordan
lisim
tlong
eeper
bring
taxes
rpark
thard
ngood
rauto
arget
indir
ywire
ijing
pinfo
bread
nstop
forth
rated
10086
steal
stake
lider
zhang
tyour
klive
nbody
tchip
dwise
ycode
tkeys
tunit
ersex
toyou
kteam
mland
olice
rands
liday
ewins
nloan
guang
inese
tthat
kicks
lfree
rotic
vodka
enius
gents
silly
stest
mouse
lebox
-show
swork
klove
drain
rbase
aiwan
lando
oplus
rcade
nname
ndown
msite
aters
gurus
neasy
dplan
ilson
asses
klist
ooter
cyber
dcast
dloop
swest
kable
dtext
ogolf
ories
ooker
inlaw
erlin
e2008
owski
dfarm
charm
yzone
lette
lfire
mbook
ttube
eleft
dpass
rbear
spoke
blade
tnine
-data
llars
divas
grove
honda
kpack
ecome
elebs
dscan
abase
llies
elper
edout
ddoor
ywest
rover
tfour
ussia
arble
unner
tgain
infos
awake
nhope
-sale
twind
makes
nties
jesus
twash
dgood
atest
ytrip
edisc
itube
ehall
loves
thold
yroad
rkets
llink
e-net
hoops
rpost
kenya
yblue
ddeal
elson
sfilm
dtrip
nhere
enver
ronix
ntrol
rdate
imall
nfair
o-web
yfund
meets
nanet
ennet
onomy
skids
rtour
rious
dzone
esets
ncore
pgame
stree
atlas
ocard
edark
ltrip
teams
ackup
eterm
tmeet
usica
tfine
drate
etone
twice
racle
ycorp
elogs
hares
trisk
ayers
tsend
oplay
lates
tmine
onlaw
nidea
crush
dnote
ichan
remix
onusa
kface
ntage
abbit
lture
oding
arama
didea
theme
exact
lcode
leave
nsoon
sname
ening
dcell
pline
celeb
apers
ookup
narea
mbers
cinfo
neyes
uotes
oping
eyour
agolf
onnet
lcity
seasy
dster
urism
tstar
renet
ngren
ucker
nator
iking
ronet
itude
brary
ratis
might
-comm
ckson
llers
yless
ultra
anlar
menus
tenet
nbits
otics
arang
ktube
libre
broad
tsnow
lwear
nware
khome
yfile
owang
girlz
nfund
tiles
zones
lplan
houji
rview
lgirl
oupon
lspot
bshop
estor
nbase
tloss
rable
lions
basis
kwear
towin
ibank
perts
rloan
uegos
parel
dship
facil
tbits
ngman
seats
prise
dhand
tflat
flame
yaway
lever
cnews
teway
orter
guyen
glife
sence
perry
gblog
mwear
darea
dport
crane
ncafe
esuit
efail
rtual
s-web
oting
nwear
wimax
llout
tjump
wrong
eonce
dball
borne
tcast
-play
rmind
ynote
lstop
chman
rehab
eheat
sable
eling
intel
dflow
rants
arock
rmark
oices
dtest
mteam
lmart
tdisk
hnews
phome
tever
dents
kfind
lview
jects
alite
omall
ecipe
idget
login
likes
crack
oporn
alabs
tails
rgirl
rwall
panda
xclub
ishow
rring
eward
i-net
etish
blink
efall
tluck
sfire
nmade
dread
nless
alpha
lpool
eseen
ditor
epure
brook
nfind
odian
dcure
swire
ified
elegs
nudes
eraid
minfo
tedit
sluts
ellas
ashow
troot
ethem
aloan
super
infor
erway
dpath
echno
apper
tporn
otalk
rmall
abies
arcia
perty
ellis
ttack
rname
hshop
rfish
akids
trise
arian
kwork
lhead
hlife
-film
knock
eever
mcity
hopes
ngold
yhall
eeder
ngone
areas
schat
caddy
tbyte
isite
-golf
tplug
kcare
smore
ycore
trest
utton
nthis
ncase
kking
ecker
senet
eaver
atown
lpark
dnext
scode
ganic
ibook
amber
kroom
ewing
pteam
odata
grass
ydays
nepal
tural
tarmy
rails
dudes
loset
ymade
mommy
s2008
s2000
delta
tchat
hbook
yboys
sfund
riter
-chan
arpet
yauto
swear
lones
gnews
nwine
gtech
nsafe
tango
nyuan
gteam
istar
istas
creen
aries
-immo
kster
-room
ntell
yhead
nhall
thope
-sexy
romos
chers
ntrip
mondo
spare
eninc
lcare
ldeal
ising
dsend
nurse
pland
lanka
ortho
erbug
aperu
twild
lauto
kstop
itime
bsoft
nways
delay
droad
4rent
acres
obaby
ollar
dhunt
roads
emate
zheng
dstep
lable
yride
-loan
bunny
ucket
eryou
etoys
tmade
orate
enone
turns
deyes
iteam
esweb
4arab
tnice
pider
indow
tdays
icons
ilter
erage
tchen
tcher
a2008
ekeep
nfarm
dvote
renow
htech
fuels
flife
pitch
equip
-gate
nship
nfile
asure
icare
-pack
eedom
useum
tvote
discs
carts
excel
craze
teyes
ncent
amore
rains
anren
rmany
seyes
chost
entro
sgolf
chome
ytest
plive
elabs
ywise
lfood
ntube
tress
icafe
tflag
urban
dhill
ehell
ailer
fault
anted
nmind
apark
esson
kstar
oloan
ncash
dcase
ptech
lwork
anker
tlets
dunit
vnews
-spot
odate
crest
mhome
rband
tfact
eeker
stall
n-net
kdeal
edump
ypack
adata
a-net
astle
elate
werks
amigo
anjia
a-inc
brush
tmake
aving
oware
pmail
naweb
ttune
pjobs
esent
ofilm
ocode
yfast
-expo
hcity
kband
antis
lotus
saway
sitem
efolk
wired
kaway
lball
kshow
elend
neral
lfast
mance
ccorp
ndian
ttell
tcome
tknow
anage
nhigh
p2008
dubli
xtech
tdisc
dhits
thigh
rlady
esman
swiki
elost
owear
kwire
-work
-baby
kfish
kloan
esold
llive
nreal
isher
phics
sspot
tners
klock
ydoor
lpass
mping
heard
xcorp
stair
otest
nstep
ocorp
arent
lllaw
bnews
lfilm
rtext
kspot
nbyte
ummer
lface
orbit
tmall
xtile
ydown
elier
yfind
nover
howto
oduct
rfeed
tuser
strom
droll
dover
dzero
zhong
esinc
tclan
inity
atics
afree
sback
nhunt
ngnow
tthis
nflow
rblue
emile
epull
pless
ggers
tfoot
giant
rplay
ewood
swise
ilive
ptest
ndays
drums
ecold
lback
dmode
ensex
clive
ktime
organ
raway
eyear
kraft
ysign
ghana
salsa
kcard
eason
anair
ofood
dfeed
rtune
yacht
nfeed
kplus
clone
opera
llock
oplan
nette
yplan
tkids
yhere
arina
uclub
eeast
thits
ttask
nrise
ngtai
kfood
elcom
izzle
anart
rites
dsure
hance
donly
twish
istem
whore
mpage
slave
xlive
sbase
relay
sorry
ornet
capes
nwill
tmain
ylong
idian
adopt
tlaws
myers
aside
dcopy
ocell
sroad
ysafe
dways
ablue
erapy
sgold
ywell
sdown
loors
cblog
chase
otown
eshot
kfilm
neart
s-net
rcast
htime
avers
scars
hamps
mills
linda
tbets
ddown
erole
crets
saved
ublic
realm
dopen
atron
-intl
humor
ckman
dpipe
tbite
nring
kbase
tanet
yboat
oxing
obits
lhelp
lcore
reens
rpage
yfeed
yfeet
fully
heavy
sults
syria
igirl
tbars
epays
eluxe
booty
eloss
csite
zshop
jiang
hemes
outer
itong
evans
obase
ecars
erite
sizes
dwine
dwind
ngyou
flora
sfeed
ettes
dfact
yhigh
amour
tsold
kolik
ndies
anman
orium
dpart
rdone
ywine
chats
tboot
-help
blood
radar
evada
npool
orton
yvote
igame
kfast
mtalk
rwork
tleaf
-toys
erkey
efest
yzero
-best
ridal
emany
ktree
fixed
equal
ewife
ptime
diets
emory
lzone
tampa
kpass
andgo
nimal
efate
oenix
opics
ebuzz
lenet
aying
alsex
boobs
dlead
rowth
kfire
chive
flats
yword
lcell
known
slook
dpool
atson
ertop
demos
ecent
s2009
acash
nback
xblog
asart
rhead
ltest
isten
ounty
thack
krate
dthat
erdog
sdate
dload
lsign
bates
tlots
dwall
lshow
dtune
n-web
tkind
ercat
nword
athon
elaws
ordie
kball
lwell
otter
reone
dpick
itesi
rtest
scuba
tpays
cepts
lator
ngles
ocars
kfarm
erpro
erbuy
elnet
dhair
races
leone
mplus
antop
yhunt
nwood
tmass
nporn
llist
sdesk
stoys
crete
ytext
cures
shead
emple
narmy
-park
nbear
erone
nread
lness
yside
ptube
rwest
pplus
lient
omega
atrip
rwise
orida
aphix
aixin
liang
rench
limos
hnson
harge
evine
aoshi
vings
crypt
olden
aview
-tool
mcard
ndbox
dbyte
dbits
acare
edart
egree
edare
dwill
plove
neinc
tuner
laska
rsinc
iming
penny
moral
e-art
tdeep
dfair
lfund
oming
chest
ldown
lfeed
ntune
ducts
eplot
tjunk
dfive
pspot
thang
pshow
mhelp
lpack
nfive
lsale
olors
spass
ywind
mixes
hayes
least
blitz
ycase
raweb
sfind
tshot
ontor
scall
named
linea
ymore
lways
henry
llday
glist
rials
iesel
rting
hcard
react
rjobs
stogo
urces
ymall
eroot
meyer
ssets
spire
ysave
tpaid
tlogs
tpain
-food
-diet
entas
pfilm
ncing
jokes
quiet
shope
arose
mlive
dnice
ybase
orris
ibaba
shits
arise
npass
shade
enson
fiber
dfull
ichat
otnet
inion
cheer
rdeal
nfang
kjobs
erguy
tdone
sfans
epush
illon
anesi
kcafe
pbook
dbids
plife
kdown
pdata
dedge
dozen
tasia
obest
tpeak
tfeel
pgirl
afish
attic
dtype
afilm
yform
aller
ybars
ewman
igger
eflag
tneed
lanes
andle
nmeet
dmeet
itown
hlove
ebate
plink
atina
aplay
rfire
ehide
lband
dbill
kdoor
stack
tfall
spine
ctric
kedit
erich
caret
nghui
mazon
wares
-bike
ssing
jiaju
psale
pinoy
bonds
sorts
nenet
rcore
oyage
kpark
mento
erjob
t4you
ascan
4cash
okids
ypool
itten
n2008
edman
dside
npath
bbies
tinto
mlist
ogood
epics
dsnow
spack
ejoin
rbody
afari
dhigh
ruise
laway
rkids
dpoll
odies
tries
aclan
llnet
liver
efelt
pfish
booth
rando
hydro
tlabs
ffect
xinxi
ngnet
bases
nwalk
owall
ndnet
dbody
rcell
dfeet
niche
lride
ybrid
efuel
iphop
xmail
eruns
nzero
adian
esult
ether
otime
yporn
yrain
thell
ylook
qatar
hplus
pcard
yroll
yback
lwind
rcafe
diver
frost
iques
stoms
ytune
ksell
dsave
cclub
ckyou
ofarm
elion
llyou
izayn
osure
homas
lfarm
ygood
beans
slock
ktest
titem
kroad
ochat
kiosk
oview
sintl
bones
rices
labor
rpass
oninc
einto
wclub
inbox
trock
ychat
rsave
anvas
c-inc
xsoft
apost
awson
ssure
rside
leven
leyes
reece
anyou
crime
gcorp
urada
pcode
toweb
tolog
yeasy
vibes
reinc
leart
minor
stice
sfish
shore
idata
grams
crisp
rarea
wshop
eaker
orial
pfire
kgirl
dcafe
y4you
ships
pcity
ymode
elast
pants
hicks
clife
erlog
nwind
chinc
mline
stern
efear
kcell
dmove
pcorp
ybyte
logix
dlady
ttank
yonly
ndice
ldate
orlds
nscan
srock
nrock
ystop
alian
merit
bgame
erare
istry
ofind
costa
eused
sarim
dstar
awear
corts
tfrom
lcall
xtube
erose
ygolf
t-net
c2008
rmore
tsoon
santa
kplay
sider
kzone
anton
tsuit
dauto
comms
stare
nmart
cksex
osell
rzero
eator
harts
ckout
maven
icago
tcent
nnote
urist
erusa
yhits
bmail
ntrue
rtrip
grave
y2008
rbits
camps
lbest
ygift
erfly
diner
rella
khelp
taweb
twife
erred
erbid
noble
dlong
gzone
lores
sstop
dgain
ators
atory
ewant
taken
dreal
yfeel
rnote
fblog
knote
-pics
mycar
ffing
packs
-foto
icard
danet
lbase
ndyou
abest
tlast
gyuan
stout
rdata
lside
nwall
cteam
rsafe
ample
antra
o-net
tgolf
tsnet
rbill
ilink
lpage
onext
hsite
kdata
yport
assic
hmail
agger
yfull
thanh
ennel
lljob
lwine
ntasy
andme
ntana
ymark
ymart
mlove
yidea
atime
ember
nroll
whois
nheat
ahelp
gouwu
upiao
flags
often
whome
nsign
hting
ynext
ughes
encia
xlife
reats
ensen
llcat
tfail
oblue
akina
uying
tfill
dfoot
oetry
inski
kwise
aninc
aning
dwild
forus
dform
srule
arris
nnies
rwine
rfind
twant
tpile
lloan
ypoll
mloan
lbits
keasy
otcom
iform
icomm
inavi
ilong
fmail
mount
-rock
ntone
mspot
ngton
telco
erpay
ekids
ngear
engda
swarm
yfive
adeal
rcall
lanta
otary
iyang
hield
sanat
lstar
brick
dease
urfer
sreal
fries
lemon
peach
ifood
ilian
annon
nuser
tmost
dthis
cache
swift
kpage
isted
iches
gshow
paste
urger
gfood
mlink
dtank
yread
pview
aspot
ntake
efrom
chlaw
dwalk
ssons
llart
nthat
ybits
pcore
osity
ypick
itbox
nanny
chill
entum
kflow
rfast
tfeet
exist
yahoo
rmine
onart
mango
mcafe
smind
imart
tleft
yluck
ltree
reasy
ronto
trics
dblue
ngrow
milan
enlaw
nenow
glive
rmart
kless
drush
e4all
drift
ygold
nknow
delhi
rform
nwide
mbank
ubble
ickup
stext
twhat
-care
ecool
agate
dmine
pball
marts
veyou
otone
lrace
lname
pfeed
kcase
oreal
ilist
dboat
gbook
-body
tstay
angan
merce
ikaku
ermap
tizen
-view
biles
nasia
fares
aimai
rwell
sblue
qzone
prove
ncial
mpass
fixer
goose
eract
thong
dride
dyman
inker
aband
leweb
iding
rlong
ethod
dhour
eseat
klady
hlive
tsome
ublog
ao123
tenow
anote
dheat
ebike
fairy
evens
nmove
lroom
twood
stamp
ossip
while
erbet
ksafe
omore
ottle
dsnet
agift
-gift
dbite
elies
sfile
slide
arbon
ybill
yself
pazar
shill
stcar
ywife
nabox
hteam
twist
drace
nloop
egive
ndesk
whost
drisk
neway
pside
ywide
rejob
llnow
sfair
andit
tella
tells
chess
nhack
ksale
huong
tgear
riley
ngbox
iture
ditem
erule
lakes
lesex
dcost
erfan
utong
thank
dlike
atica
shout
sdoor
lyman
pmark
ingly
thalf
ednet
teast
lcash
yleaf
oball
dwife
yrace
icall
lsell
andan
aints
lingo
aches
alore
lring
-arts
zhost
itnet
osale
nesis
orent
mixer
frank
cabin
alton
nease
yarmy
dones
eguys
etool
ionet
odoor
hclub
tbugs
opost
agles
ylock
seart
unger
yable
etips
rwind
ddrop
ropen
tself
ditto
lscan
yfoot
egate
weird
emiss
eblow
yfirm
netic
dleaf
ospot
lpart
lword
owork
awood
nbill
ngart
kblue
orker
esays
ogold
mente
flick
mjobs
cgame
12345
tsets
amics
lnext
lwalk
kmall
deast
dporn
ofits
xinfo
tauto
celik
smode
penis
kname
uters
lbear
lwall
atter
shers
mmail
mdata
ivery
gcity
edbox
laims
relaw
yreal
opage
yhope
llbox
y-web
nsome
-wiki
wgame
anbar
birth
12580
ktext
kfeed
lsave
ggles
timer
reman
nlead
antic
yjunk
monds
spart
esses
ybuys
istic
llate
rfarm
aplan
cline
lnote
regon
taxis
dgear
llbet
wraps
slabs
4hire
nmode
reeze
awiki
ing4u
cmall
nacks
ydogs
eyuan
eclan
ecomm
nonly
ajiao
etbox
mband
eknik
esage
krace
sarea
tcuts
-safe
dsinc
mwell
anfan
aroom
tempo
david
irror
manor
kwell
ermen
roses
ibaby
tturn
ields
myway
ergia
rpath
e-sex
aporn
forte
eshit
upons
ohelp
cbank
olbox
ixels
roglu
ynine
elman
dcars
duser
reset
resex
detox
zclub
ftech
berts
eskin
mwork
ndisk
inked
anbul
retty
arity
comes
aphic
-gear
yring
starz
tonet
kpool
sfirm
yhand
isweb
uring
-cool
since
maple
npart
erset
acall
lfile
ought
kedge
slash
gbank
esday
emoon
abels
scool
timed
nrace
liste
lista
ssafe
llbuy
erbal
cells
enweb
ygear
c-net
s-llc
serch
tused
enart
astor
dsets
-yoga
rgold
lfair
vtech
grown
tthem
eyond
-post
ymoon
ingup
njump
-teen
pcare
pwork
ovely
klets
obots
achan
ynice
ntang
mpson
tserv
midia
xcity
lcast
nlift
rlock
otron
mtown
thour
dealz
pstop
ywill
dfirm
peasy
ycast
swiss
nstay
nfirm
motos
logue
tbaby
elles
ansex
scash
nfoot
acars
-ware
ksave
enses
isdom
llcar
nopen
neast
i-inc
oauto
mtime
lmind
tweet
wjobs
seeks
brake
entes
xnews
hloan
lsafe
nchat
mzone
peaks
screw
swill
yload
erpen
ldone
egift
nrate
gwang
nerds
dhope
rfile
yrate
tists
lgold
dhell
nfall
nchip
ablet
pblog
signz
istro
ivera
rporn
omedy
tkeep
lhall
anjob
esway
dwith
npics
stbuy
soles
ermix
icken
khead
avida
yment
lying
ankan
patel
lzero
opack
hinfo
bclub
rflow
lters
gster
usnet
ngain
droot
which
hrome
echan
phelp
irate
perks
etals
gspot
pzero
kanet
teman
maids
oward
nbaby
yorum
mdown
tmate
klein
-step
tvice
lchat
crowd
scell
erala
ingit
elton
badge
gtown
lrate
nquan
beton
rgood
nleaf
olabs
ndlaw
alman
pcase
smine
zblog
erent
xsite
ndare
dchip
kthis
askin
llboy
ntart
e-one
brite
ehuge
ndeep
ng123
unnel
silva
nhits
lhope
omaha
kwall
thave
pdesk
hecks
onair
ingon
aroma
sante
wsoft
kplan
reast
erget
codec
wlife
i-web
nhong
s-usa
punch
-band
ancar
dkeys
trike
ehber
dbear
tdump
fushi
lbaby
ewarm
inone
rivia
dlaws
stsex
uller
ssell
sthis
ymake
ances
kside
ptalk
rwalk
kcash
wnews
tgate
ripts
jogos
ohana
ewiki
eodds
eugly
ssign
rsign
amble
ewave
caffe
rones
esong
lbert
emaps
ybids
hrist
alism
arade
llpop
snack
trule
keyes
ralia
erbit
these
ofree
bhome
rclan
leach
mover
pfund
valid
aniac
repay
lroad
ngift
nlike
erall
asion
rizon
chweb
roids
lhill
blend
ehear
ndfit
yscan
ofish
lhere
ydesk
empty
icars
optik
llwin
egend
marco
llone
-wear
etill
hared
etlog
ryman
ereye
esyou
shake
atone
lfind
armor
itone
dhole
imals
qiche
abyte
igold
uhost
cknow
emess
erlab
adnet
chnet
mmers
wtime
dsome
kport
esfor
phunt
ecamp
isual
yhard
yfair
ansas
kgold
erair
dmade
lhack
rfect
pwire
rbest
kdate
plash
an365
llway
yunit
ydrop
-rose
kidea
orway
owest
ysuit
maybe
edear
dsuit
yarea
tract
otrip
odown
mgame
amway
knife
kscan
ntent
spics
hblog
rames
stool
rican
limax
atent
mfree
oform
ymove
tatus
senow
lheat
nflag
g-inc
anday
oband
rload
eylaw
elane
swish
reart
ridea
rword
kback
unews
parea
ansen
kmore
tmuch
ndout
yopen
linkz
llsex
oling
kship
rcost
bzone
turka
rknow
igner
herbs
ekill
alsat
abear
kbest
lebuy
casas
sbody
arweb
ksign
rmeet
stran
nkids
ediet
icles
tsinc
ypipe
ostop
wyork
tgrow
i2008
amoda
denet
karea
lship
scher
rhand
burns
ecord
ppost
iauto
tpush
ncast
uzzle
nfeel
snote
dcash
rados
bmall
dtape
array
khair
rlook
rooks
tpull
tmiss
eorge
ndrop
therm
nmake
ptown
kmind
datum
lfive
yfine
ticks
aguru
llson
yhack
eboys
stask
kform
icast
icash
nboat
aunch
2live
dedit
owska
arren
eneck
pdeal
a2010
tplot
yship
tpure
stics
omoto
ggies
-stop
ktrip
cloth
ykeys
toria
simon
nfans
rgain
ebind
owave
ehint
ndman
brian
adate
bills
rafix
pking
mking
ndred
junky
grain
smade
-blue
khole
ndall
etrix
othes
pumps
rinet
ijobs
owens
-code
rhope
erten
ercan
hland
gxing
ingtv
ebulk
lucas
crown
cular
ydisc
spath
ingin
stays
ddesk
rstar
-base
ymeet
otoys
ntape
mcode
esame
rdoor
itage
spect
amoon
antin
bored
efits
rcher
cland
lmode
pfree
isnet
ergas
skeys
nyang
rfans
hsale
tcool
ingme
riage
udget
ehate
psell
esbar
etees
lbody
illed
xhome
tobuy
orweb
urmet
kfund
ianyi
itout
aviva
kcorp
antos
plock
yeyes
nluck
larea
agear
-labo
-labs
kfile
lefit
rease
erout
agers
ocast
-call
rdlaw
nghua
althy
icode
inhas
allow
rilla
tents
spill
kpath
lenow
t2008
m-inc
erian
biker
rough
ifang
ttrap
tjoke
admit
nride
ytoys
lplay
dchat
opart
erjoy
pband
sease
expat
omark
mpost
sever
todds
sheat
aport
tyear
angas
kload
rater
sball
alala
-moto
ocore
gears
raham
atrol
edium
ndway
lmake
isson
r-inc
goals
sochi
tebox
ixing
llkey
ppies
fixes
ytask
reyou
dlift
kiest
lgood
ldrop
itoff
inery
dclan
sinos
dwood
ugame
fling
rthis
owboy
-plan
tster
4shop
llets
dkids
tsort
oride
nlogs
nrain
ounts
ncopy
iinfo
dwish
wsite
kmart
djunk
ntcar
sthat
rence
kroll
pidea
nhold
elder
opool
itian
eisen
randa
ersey
habbo
enger
sface
icart
fleet
felix
ciler
ogics
ppage
npain
acion
ekeen
mcare
4home
alues
mstar
nfeng
baron
lasik
ybuzz
olicy
ylike
eimei
rself
rsell
arbor
speru
rmade
acket
dsoon
eturk
asics
ttake
lthis
ikids
rrate
yneed
tfolk
izona
yplug
raphy
ebang
setup
hline
anhua
ulife
njoin
ramos
alart
nrest
tlies
asweb
shame
rroom
bells
lvyou
kcode
t-inc
a2009
esand
nfull
kview
nella
reway
esurf
llbar
nwave
nkers
op100
ihong
ntask
aobei
rturk
esall
ntank
ytrue
tlost
pfood
mones
lwise
redge
spick
lcase
orses
ksend
yfour
-wine
rmann
llook
yjump
mable
npeak
enton
ebury
chard
ittle
sopen
ngjia
lcars
dmall
stbar
dturn
ngolf
ltube
stips
smeet
lbyte
rdesk
rpool
kover
arone
ochan
twarm
sexxx
flair
xplus
erfit
olina
khunt
drawn
agora
htalk
blove
ktank
etnet
vlive
dever
lport
dekor
raven
doyle
ermag
andes
rhunt
dlots
ywish
nbars
lmine
okies
rcash
binet
tamps
wland
amboo
oname
mpics
nling
-wave
ptics
genda
ailed
inova
nmine
ianet
adrid
curry
itate
acool
ltrap
nfact
ybets
otree
rwill
khill
lamps
slead
rinks
stell
afoto
swind
rayer
tsaid
ankey
cheat
leigh
meout
inweb
oface
mekan
arium
tamil
tonce
nches
xgirl
cator
tarot
achts
ttrue
lites
pfind
rybox
lmade
tages
seman
tdraw
stash
ndraw
tilla
2play
ayout
acell
bster
wshow
lifts
ypure
tends
hplan
epoet
raine
arner
chiro
heets
swide
tpair
proom
razzi
rwood
dcuts
lelaw
dsize
surge
eatre
champ
tkill
humbs
athai
uebec
stery
eslip
pinto
aping
-guru
uties
nhell
arrow
onice
0plus
o2000
ygain
venet
esbox
yrock
lleye
dslip
lyers
ucity
-fish
ovino
paway
lular
nnett
remap
owine
chevy
rekey
npipe
dflag
r-net
coded
anshi
lwire
monde
pplan
pplay
ilers
tgift
erion
elina
psave
dmart
oroom
esake
essex
meals
pedit
ploan
ofast
mtube
rbars
oller
ivine
lflow
nroot
uscle
pwear
illes
ecams
ztech
rdays
arten
kcore
kwalk
ykill
imber
ckdog
snail
cigar
kring
eapps
rmate
azari
roove
pcall
erice
stdog
hions
sidea
hzone
hfish
osnet
gfree
enbox
npoll
omnet
sboat
rdrop
gplay
rfive
ypeak
anity
anite
lloyd
ggame
rpart
antan
ng365
anian
gfund
rofil
laris
kcast
manet
pbank
magen
-good
-core
dbugs
llfix
ndjoy
ndbuy
flock
angry
yedge
ingen
ohang
osweb
lewin
ptops
ehalf
ermax
e-inc
antes
uyers
ikers
rmony
ofuel
plady
wlink
forma
heweb
ngcar
tment
estax
amina
aauto
ndpay
nsong
rwiki
dluck
tmart
uises
fraud
llpay
cousa
lhell
olong
ghong
ndnow
aball
hurry
argas
thman
nedit
ytrap
herry
ndair
kiran
lcopy
winfo
wfarm
mmune
amark
-wood
leway
ndwin
rhigh
t-web
ndset
kvote
lords
otice
neill
nites
salad
stlaw
ntjob
nfine
nswer
ytype
pflow
xlist
ltune
ndbar
dogan
-hack
pdown
ecart
npure
lsoon
cipes
rural
dpain
hop24
uenet
vsoft
resto
ellow
ellos
-face
oscan
wcard
plies
asale
etten
erhit
wroom
1club
ilano
avior
nrush
etart
usite
ifree
fclub
yware
pship
rveys
yloop
iters
souls
lynch
rgone
shave
veman
kayak
tlegs
leboy
eduty
dshot
tonia
ngwei
vogue
alook
aface
keith
-hair
rgear
eston
nther
smash
oarts
ocall
lopen
stbox
profi
dmain
olite
sstar
-serv
inage
chris
ythis
illis
asher
pfast
kword
tcold
ywalk
krock
pread
urney
stnet
inson
-cast
ounce
igars
scase
naire
atnet
erwin
mbest
ejust
atura
dguru
ylift
masti
llins
areal
nxing
beria
rtype
owser
sical
idays
lporn
andel
ofoto
ycopy
iddle
dseen
harms
incar
strat
redog
rhall
tterm
dhall
sunit
ewhen
araya
nchan
leout
etcom
evida
aform
eadds
meyes
tadds
lpath
tbulk
ignal
asexy
nista
perez
kease
rnice
ndfly
ndkey
lldog
swine
srate
given
bfree
gwork
pfile
cklaw
tania
libya
laces
sheji
nberg
lless
ihelp
haosf
wteam
-mark
curve
rbets
ndjob
dbuys
yflow
anbox
elody
npick
ofire
mould
festa
islaw
rylaw
anavi
erkit
nions
hwear
ptree
lants
tjoin
nslaw
mming
ledit
yways
sages
anyan
etman
acasa
ebdev
tfear
plead
eeven
inman
stjob
tyler
kichi
rheat
dsman
favor
klook
shima
ovice
lchem
lster
romeo
ulove
uared
adoor
pchat
nfest
bians
gline
afans
erlot
canet
albuy
ckoff
99999
remen
hells
midea
ouple
ospel
ybugs
umall
ycome
nwash
asnet
hspot
wfilm
rscan
pform
ijian
wdata
logia
lemap
esbuy
wfair
srace
ytell
yfans
refit
telog
sarmy
a-art
ylost
rebus
ycool
teats
lrain
ntnet
icing
mfish
yuser
nwhat
ndles
itbar
dplug
kcall
rches
reten
yrise
dself
sdiet
dness
odeal
vette
teart
estas
rages
hhome
ndart
usone
-west
cjobs
ocess
tsent
ohead
2host
eslow
eslog
llair
ougou
isinc
groom
ssnow
dbars
stmap
adinc
query
palin
ernew
drest
seout
erbus
snaps
amper
naway
shequ
-moon
htour
eupon
lblue
onger
pples
emean
nserv
umbai
rhair
p2009
amade
ycars
ehave
erger
lchip
retop
ancan
visas
-lady
teven
a4you
temps
lobby
mmies
nturk
gking
rdogs
asons
lmann
kopen
ktune
cracy
0shop
anxin
yrush
ckart
ydark
oders
sarah
bilgi
ticle
repop
llfit
ggirl
venta
shear
props
ratec
ofone
styou
stead
-easy
dfolk
inton
gable
adise
stinc
measy
czone
wbits
gfind
adone
nebox
lidea
apass
anana
oknow
ogear
eeats
letop
hgirl
ardin
nlabs
lelog
epoem
efans
rdisc
llits
wline
ttles
ehero
nvote
recat
ndbid
anges
abang
amine
euros
usinc
idder
starr
kfour
nancy
zsoft
ersky
hfree
iewer
2earn
phead
lepop
ielle
narts
rpick
hfile
mplan
mplay
epper
daisy
ysinc
ndcar
aynet
kluck
ckbox
esten
dfear
mhere
nging
nelle
shinc
otect
gband
tcube
wplus
oport
tecom
meier
nyapi
ksnow
ndboy
orock
gbase
aniel
sfine
chant
nitem
inair
stweb
enovo
ttape
xporn
acked
lgain
oloji
ainer
ainet
edway
dpeak
tecar
thumb
yprus
aopin
lkids
chall
oover
eando
cando
arnes
estin
sways
dfest
erbay
norte
aslaw
orart
aynow
aidea
rotik
nviet
eeman
endor
hlink
ybags
third
yhole
itron
esrus
uzhou
iline
brock
ypart
marin
rgolf
emium
ycure
quinn
ezoom
amous
staxi
grows
awire
rnine
astik
ypath
utfit
rtone
onday
daili
-edge
rotec
repro
nclan
umail
rfoot
uhome
ndsex
ranny
vinyl
tjust
oclip
rhire
pbest
licht
filed
nthem
oarte
llfly
kdesk
dison
pwest
mixed
uyuan
aroad
ocure
lthat
hshow
casts
gclan
adent
ylead
rhits
-find
rries
yflag
dpush
inwin
ongda
rmacy
fshop
wires
leasy
rtage
meinv
iband
monet
nsize
nings
wtalk
rbids
cubed
ngmen
andas
btube
zhuan
pthat
mtree
enavi
adair
ronly
hface
orist
ozluk
chief
lhigh
dhard
mfast
sejob
ogist
nbids
erfix
dpoem
oning
enate
ilbox
ityou
reday
afone
tejob
workz
dunet
ocate
terry
iones
kitap
gtree
jacks
syoga
omind
kpick
orion
enten
dtrue
snext
inway
-back
lmeet
agnet
earte
ounet
renda
vatar
d4you
moose
pdisk
stend
armer
scure
llmix
olsen
chuan
adora
texpo
erler
mfood
ional
larmy
swers
yfact
onite
m2008
ymine
dties
anway
msale
orrow
matec
sguru
llady
arius
teone
ylane
icool
slice
sread
nnext
necat
hgame
lrush
ogift
8shop
gpack
ndmen
enear
odays
aflow
-chem
lpick
vebox
logan
ailor
abber
rette
dthem
ndits
nwith
ystep
dkind
tedog
added
adder
isart
oplex
onman
ntman
eamer
epast
lbars
steve
gjobs
daweb
llman
rwash
dwiki
ckits
npush
afund
rince
kthat
rreal
aison
rison
arsex
entia
entin
entit
ddraw
rware
kgood
ntair
nghai
dwhat
onian
afest
iwear
andar
tdark
inish
arway
emust
towns
nboot
orkut
pulsa
esume
lique
ushin
allah
ertea
ertec
ndher
lshot
icore
ywash
erand
rroll
cupid
mamas
idges
-rich
milfs
uston
-made
obyte
mpack
rebuy
ngout
lform
-fans
minar
psign
pover
kumar
desex
licon
frogs
gyang
gdown
wmark
dwash
ordon
joker
trole
quito
quite
coweb
yrisk
ntion
cobra
ndark
psend
illow
onate
apage
erior
licia
elinc
lemen
uncan
eslot
shman
esser
mopen
arlaw
kfull
sover
ghter
ldoor
llbit
wtest
thorn
reson
stman
utler
ntian
drule
oters
o-pro
rfirm
sebox
dhack
esaid
nsurf
osmos
antoo
epoor
khigh
ntoys
c2009
oveis
weden
alers
rcopy
dnine
inate
kedin
kboat
doubt
okeys
rtnow
hcode
llfan
kwind
kwine
yboot
alcar
ocasa
ipost
ysoon
ng168
drill
rders
ecook
ownet
aryan
steen
owman
mball
4love
ewart
craps
rwish
icnet
butts
vesex
xteam
almix
ghelp
ylaws
mydog
sbill
pfarm
otype
bblog
dmile
neman
brass
grate
ilton
pcell
teros
lwest
xtoys
walsh
grees
mring
merch
emuch
reall
eboom
wbest
iando
ycats
y2000
widea
ojazz
kdrop
antec
sring
ebird
angcn
ikini
nwish
gauto
ueens
rstop
ppeal
ppool
wolfe
alina
rlabs
rvote
fnews
lmain
alyst
recar
dtrap
fires
rebel
anmod
urage
lhard
anice
anica
amerz
rback
sporn
estan
estat
pluck
oodoo
avian
klike
ovsky
worry
medix
ltype
lboat
viral
omate
ntype
ndage
ndago
inets
dmass
cores
enbuy
d-web
lware
alend
ltext
uchen
popen
kwest
dango
lflat
awest
idaho
wpage
psafe
khits
plore
ebcam
bland
ryart
deart
larts
orbox
stape
lebar
ndfix
sdeli
enway
lyart
ysend
optic
ddisc
nbite
amara
lepay
rsion
tereo
ksure
lhunt
unday
enwin
mcars
dfine
yones
depth
sless
lasia
rluck
abell
alnow
cosme
laweb
atong
d2008
nokia
gorod
alift
utbol
aines
singh
insky
2cash
dodge
tecat
llbug
rebox
itman
onson
ygals
itcar
kbill
huayu
refly
wspot
erbag
nesex
r-web
edias
nfill
ycuts
ecame
ckboy
lypay
oscar
slady
asala
pjump
rsity
-form
manda
mando
icate
andis
hawks
orean
owhat
probe
iaweb
ng888
coats
ethen
gcode
asign
livre
rewin
njing
afire
utour
aroff
oomer
rmode
phack
encan
arman
lkeys
zlife
yease
otune
edies
orgia
ewere
rroad
cknet
tdead
dodds
ogers
macon
ckbar
innow
rturn
pface
ltell
rters
ltape
mroom
sbaby
ddays
dated
gpost
vorce
onova
sgrup
urizm
pstar
kchip
awind
wfast
nhung
ritic
nbuys
2sell
ldays
kbyte
nddog
ramed
anguo
andor
anlog
ysure
shard
spoon
onote
drake
bilya
mfilm
yanet
nhide
s-art
salud
-haus
ovate
rmass
aintl
alize
centr
oheat
elart
nthai
rling
stype
oment
scast
uites
yfrom
pipes
aloil
dpile
maweb
fetch
e2you
smove
djoin
yteen
yitem
llsee
an123
nzhan
geman
nshan
ghill
tsweb
rrace
wball
ayart
multi
ydeep
-well
inboy
oiler
deman
rving
atjob
kmeet
gcard
lfoot
zzang
saber
wbank
rians
rfund
3shop
lvote
tshit
aspas
aners
ohunt
opets
hbest
kfive
rfing
svote
hcorp
ydone
aoyan
amama
rfair
nfour
aogou
nsend
stuck
aluna
tpoet
trent
michi
rpain
edoes
uzone
ointe
ortex
rtain
operu
ecube
oflex
netix
tarts
rlead
lhand
fsoft
getit
ufang
emote
kbuys
tamin
sasia
rsure
lmove
kroot
rrest
alair
untan
erpop
llmen
kmine
ipack
tmile
-arch
demir
tling
epets
ebags
lfour
ycent
edyou
pmode
loops
fclan
erkid
wlive
icked
nainc
rrain
ansky
ychic
ychip
tflix
lpoll
kfoot
ssimo
yturk
itars
uporn
runch
yknow
mtest
retro
hroom
artes
mflow
rnold
risis
oflow
sties
lefly
noted
acine
cauto
2save
hardy
ostel
lfirm
ntpay
mccoy
stjoy
ckage
tlend
quake
dfall
tewin
clove
eclue
fting
ystay
sebar
ndrew
ianos
panty
a-usa
glock
antry
hking
ofeed
ecinc
usage
rasia
bcorp
gling
erium
tinas
hadow
slike
avery
ilnet
temap
avies
nseen
pmeet
ancom
-date
onico
ribes
atdog
yshot
tcars
lknow
quilt
llard
llare
cdata
erank
nterm
nturn
nwild
lwiki
argue
ingco
reguy
0game
olady
rtoon
shuma
pwell
klift
aride
top10
stmen
adart
thlog
kstep
tneck
ybird
rente
aples
eboss
areus
dbaby
dgrow
-this
eguru
onbox
s-ltd
elain
celaw
rlift
itest
iring
ndled
shigh
recon
almag
ooler
lstom
dflat
rhole
ctour
gebar
arche
nbugs
kyour
nonce
decks
injob
pmine
nmass
sflow
nmate
arded
idong
atree
rperu
idnet
froad
bcity
allaw
agree
lreal
ayton
itive
fucks
anbao
prior
etsex
ncure
olley
etrax
mfest
lejoy
atang
clink
otake
angou
areye
seboy
ytank
alaya
kwill
ppark
rchat
ysome
scamp
drunk
c2000
bject
tslot
tslow
reddy
ogogo
inetv
mware
erstv
isage
idade
rjunk
tally
dboys
ergen
nejob
-zero
itoys
rkeys
tebug
thate
atart
wedge
sthot
ymain
holds
esire
snake
otage
mpegs
gplan
khand
owned
co-op
kfine
rills
hfire
erial
rouge
hcall
swith
2home
hjobs
lboys
ifans
ayspa
atbar
spaid
opass
-bali
ulous
23456
yover
onion
onner
ronik
ncost
lnine
rmove
twait
4good
sfera
adown
otext
nbets
hwork
messy
mcall
indie
yever
ateur
tebid
ycost
leair
akery
oyoga
racom
gfilm
ngpro
twhen
-farm
lloff
ucher
tsand
dtask
steak
obear
intop
tival
ndwar
wmind
llguy
vsite
cenow
anual
dnavy
antex
weiss
hilly
anten
tsrus
rgrow
tsbuy
ungle
osave
ygrow
ceway
tduty
obody
rynow
erhub
softs
lefan
canon
ehang
sfour
ledog
oraid
orain
rdisk
wgood
olate
mcore
exinc
ethin
gfish
ysnow
thony
esout
sheep
arita
ersia
anics
tcamp
ndisc
icole
asafe
sible
fifty
staid
kbids
afarm
haway
iplex
erbig
omove
medit
oturk
tclue
dazhe
irman
artis
wheat
whead
aasia
-rent
hstar
salem
tcrew
arper
elian
icone
acers
nchen
e2010
stkey
heros
cutie
cooks
fairs
arion
rkiye
ttour
lemix
bowen
sesex
unwen
mmark
rande
gtong
ersee
elice
rsend
mshow
rynet
cocks
leset
cruit
a2000
ypaid
yjoke
neweb
lhair
kbear
flove
gecko
cesex
uddha
tinet
lcome
anmix
itics
anies
kmode
-page
lazer
chung
funky
sneed
amate
etera
d-net
aigon
ngang
sgate
nskin
oyota
eiren
inhui
igolf
llbus
anpen
azzle
ulate
epeti
ebell
heese
eputt
scomm
lhole
peter
lunit
neset
mchat
tynet
stnow
lbids
asurf
ensys
andra
okulu
olift
ssnet
ndinc
izone
sbear
hoken
aydin
rfeet
reboy
ellon
anmen
anmei
aloca
agine
agina
mform
mwiki
wpark
lofts
stwin
debit
andie
gitim
stusa
stled
tworx
ashot
arjob
htube
ofans
stbit
chelp
mfire
leaid
belly
carte
sgift
conet
dknow
erwar
grafx
kgear
swood
-mode
ixian
pword
-mate
arboy
arbox
wcity
oidea
homme
wlove
nkind
carro
oguru
pture
tgive
orado
owout
ohair
wblog
ivity
troke
gname
olita
ayboy
ncool
pgear
iaoyu
mfind
ysize
thear
uming
buggy
umper
burda
orsex
nlots
those
kazan
saude
andom
iplus
obill
ceone
twiki
owiki
ppass
gpark
bbank
onway
ymate
ohere
reeye
gemen
couch
nlake
cubes
fense
kdisc
ultur
emale
etnow
ylate
ucson
-cams
rmost
rdies
thart
eroil
tbird
s-pro
ipiao
dbets
mwall
onlin
stlog
insex
omama
azing
oggie
l-net
riday
emand
ertax
yboss
dpoet
altop
ysong
llcan
-ring
escom
rfact
dtour
usual
inday
raves
fcorp
ptype
ehung
ngzhi
imono
glory
inary
gtime
gcare
teweb
mtrip
epros
veris
venus
atman
ndong
ritas
t-art
asser
teout
pouch
wband
tsame
tdogs
seyou
reweb
huset
rgeon
rhill
choir
-vids
oyeur
yties
darmy
atoys
opros
ologo
mamma
inbar
inpay
fluid
zinfo
rslaw
ymile
hythm
evids
hobia
ebusy
eteye
eneye
gfest
rfull
0club
ydney
gfire
ticos
loger
tpoem
pauto
ryboy
albox
ebrew
jelly
ifilm
tubes
anara
tesex
sheds
mview
alway
nmain
esmap
aoyou
wagon
torch
anwar
entan
flare
odles
wtech
gives
tyweb
ngage
-idea
malta
sform
arlow
heasy
olock
pnote
urphy
erous
smake
indle
iroom
hmark
satis
ercom
rbyte
hurst
stany
tacts
nbell
aname
ushan
tugly
ewait
wsafe
llect
seall
ipark
awful
rakes
ddump
stbug
shnow
idate
eroes
blanc
csinc
sware
e2000
lcost
ndnew
ldlaw
reise
itjob
tsbox
asers
rsing
mpark
mypet
wking
-area
imoto
shire
itart
rtape
hlist
canal
thlaw
stfor
utors
eynet
altax
lawns
anfly
adeco
-thai
n4you
iliao
abody
ickey
rosys
imama
khack
inyou
lecar
cally
osted
finfo
ntnow
ngfei
pbase
cclan
leyou
ybell
tance
nkeep
ushow
semen
ither
uropa
l-art
betes
ctalk
lroll
fones
sscan
lturk
entis
ozero
rlies
anusa
anoil
malls
ichen
d-inc
nghao
anbit
rsuit
llice
loser
mines
ljunk
shand
etall
m4you
gizmo
otask
kinds
sroot
rines
lence
atbox
ktrue
nlian
aynes
rdeep
chten
mdate
avern
mirez
recht
amaya
hores
ormat
lfans
vhome
boshi
wales
asell
wnote
ytour
rmill
ngpin
leoff
eflex
runit
gauge
aibao
apack
l4you
ustry
lered
eclip
ypics
mster
oozer
omode
arsiv
ldeep
rmain
anget
issan
rismo
rloop
agram
ubmit
esusa
albet
niran
ycoon
flood
ltank
wwire
asave
khere
llcut
bbles
amoto
rstay
aplex
yards
leson
scopy
choff
rkers
yheat
orite
ogene
latte
hauto
muser
rmake
yfall
ngate
ndlet
velop
ocamp
ormen
ndmix
draft
urier
ongas
rhold
bauer
ymass
alled
teyou
stpay
gfarm
uland
archy
tpros
neats
tears
ouche
ndiet
rogen
-surf
acomp
pmind
rseen
atall
ntend
olver
polls
xgame
genic
lself
ltour
intex
seone
nnine
ckair
t2010
wless
elena
mandi
tslip
yroot
lshit
anban
hunts
pulls
iping
atana
otaku
lejob
alady
pmore
odisk
djump
osaka
ewolf
athis
srain
mtour
eteen
celet
1shop
guest
oboss
m-pro
emama
awork
enemy
lpipe
tscar
tguru
ieces
lltax
robin
oserv
pdrop
drock
aloha
ecomp
heory
eshoe
irage
ndtop
meart
omero
uwait
pknow
adium
dstay
essay
awave
ithit
ygone
tnear
ntern
ldesk
blife
tahoe
alfan
ymost
ckeye
gbear
cwork
rkill
rpets
could
ptank
rtell
stees
etbar
linds
uckit
neone
rlike
nbike
ngrid
llace
edlaw
ddark
teoff
dneck
ysnet
asong
ongyu
ongyi
ywith
-ball
ulive
avage
lrisk
-lock
racia
ckmen
kbars
tbags
osque
banco
unior
kpart
anxia
orino
uncil
dpair
regas
porns
rners
ergie
yrest
stoff
gwear
obike
kdraw
veone
agons
rebar
migos
wloan
ssend
anpan
nston
lltop
comix
ababe
anboy
hanna
draws
ayusa
-rank
netwo
lwood
nseil
kwith
atcat
ifarm
vshop
wname
leage
rchip
boxer
xdata
essor
chday
yarts
ckmap
larms
dinet
sfive
atyou
pware
rific
dawgs
ketch
pblue
alani
alane
vegan
atips
rgery
mgood
sefit
arger
apics
thnet
p-inc
pporn
uinfo
flink
artop
cairo
llhot
erack
thone
dloss
esoul
blind
elers
pfull
teady
ngdao
inkey
pwall
alfix
alfit
mainc
omeet
ndoil
reels
bursa
gdate
kzero
g2008
alves
amani
y-net
rcure
teran
letin
hfund
deats
earls
eddie
gless
o4you
inute
usana
mlabs
ocool
stark
repos
lsweb
otaxi
monte
hable
resat
rdraw
riest
dgive
yemen
-mama
reira
pgain
dlegs
narte
trare
selog
uting
enday
nhole
oties
nrisk
rebid
rebit
aires
ininc
sride
natic
ipped
juego
kware
elone
rdens
iplan
ddisk
ecial
ypain
gthis
tebar
zteam
malia
etpro
omass
cribe
farma
yhear
mgirl
umble
coltd
olics
meras
uzica
acart
ryday
xtalk
oogie
ibiza
phair
oodie
mblue
zione
wbook
achip
osong
abuzz
crate
htest
joins
i-art
knine
gkong
airan
alkan
tupon
rgasm
indus
tgone
rists
-five
phill
phile
tiest
oader
sjunk
inney
egoes
aloon
beatz
haser
shunt
nmoon
rsnow
zcorp
smack
drise
llask
fixit
rboat
dtell
tsout
tsone
isors
rball
dmake
astop
otape
gress
wgirl
aypal
scost
rindo
itred
o-inc
dging
radyo
ckcar
anlin
specs
asoul
tsman
cware
debts
petro
vinfo
edice
uffin
lexus
ckset
ecake
edweb
stbid
bello
ndbet
vendo
aurus
reoil
ebore
j2008
ingus
obets
ktape
sktop
anets
esing
phong
nyour
spets
dlets
mporn
ineer
edead
lyric
akhan
anmap
mwest
elimo
zmall
tfate
selaw
oused
rpure
flist
wsign
ofund
rsons
among
owlaw
srisk
teinc
jitsu
sboot
iloan
yhour
macro
nanda
ellit
ansit
awine
atnew
admap
ddone
ssjob
diana
ywant
hpack
nkill
yhell
xcite
phits
szero
yrose
h-art
evisa
ervip
yflat
lbill
sbets
sharm
shara
rcube
radas
lsinc
arias
ganet
lysis
bulls
er-it
ywild
mahal
ritos
ncers
meinc
kfact
sgone
fiend
uline
eming
sahin
debar
dyour
infan
zland
geria
pback
menet
ydiet
entra
onton
iporn
iport
mfair
ysold
ekiss
cater
hpage
thday
orise
astan
clima
climb
honor
emule
ylogs
nekey
arnia
rasol
ssart
rlogs
aturk
anoff
marsh
itang
owhow
ginal
ryear
emann
18114
apool
ahill
mpath
aybuy
sfoot
fzone
lysex
kunst
seday
-rush
gallo
nclip
eshut
rtoys
inbid
""".split()
for index, name in enumerate(POPULAR_SUFFIXES[5]):
SUFFIX_SCORES[name] = (8000 - index) / 9600.0
POPULAR_SUFFIXES[6] = """
online
design
travel
studio
global
search
mobile
sports
center
direct
market
-group
school
gaming
realty
system
vision
esigns
family
health
events
energy
portal
etwork
office
server
osting
photos
racing
source
movies
domain
review
forums
beauty
videos
sgroup
agency
invest
-china
master
europe
agroup
planet
nation
credit
ystems
finder
-media
casino
xpress
ervice
motors
hotels
tudios
church
report
-world
images
roject
-music
models
canada
rading
ashion
sworld
people
egroup
soccer
ngroup
apital
doctor
trader
lawyer
street
ompany
stoday
change
tgroup
expert
hunter
omedia
entral
emedia
living
brasil
motion
inance
nglish
splace
eworld
amedia
dating
france
ogroup
supply
-forum
island
garden
ebooks
italia
ecords
dental
foryou
igital
estore
united
onnect
igroup
flower
lgroup
-store
coffee
emusic
tation
wealth
-style
iphone
lounge
sstore
amusic
clinic
merica
upport
stream
ollege
itness
allery
google
-japan
estyle
survey
omains
sindia
smedia
smusic
mgroup
rgroup
centre
dreams
gamers
nmusic
nmedia
stores
ewelry
broker
cademy
cgroup
onsult
friend
outlet
africa
riends
profit
tmedia
ehouse
mexico
player
corner
london
anking
social
circle
phones
estate
houses
fusion
groups
shirts
utions
bridge
tworks
career
-games
tstore
imedia
sgifts
edical
repair
heaven
oworld
league
taobao
-hotel
ending
dgroup
marine
turkey
rmedia
aworld
edding
access
lution
achine
action
wheels
driver
rental
stocks
etoday
entals
omusic
sforum
-trade
elecom
tennis
etrade
tuning
cinema
cation
astore
resort
lights
ostore
square
engine
insaat
script
garage
ebsite
rvices
pharma
asters
ftware
sphere
sounds
egames
yspace
yworld
tspace
xtreme
sohbet
actory
-india
hawaii
ymedia
evideo
secure
nworld
pgroup
valley
aining
tmusic
upload
reklam
ations
bgroup
future
series
select
hannel
advice
ntoday
matrix
eviews
export
orrent
oncept
angels
awards
eguide
things
nstyle
racker
iworld
shouse
opping
emoney
nhouse
lovers
income
dmedia
gazine
ipedia
eforum
choice
siness
tworld
comics
spirit
hgroup
eather
nstore
nchina
monkey
empire
assage
boards
campus
crafts
safety
eplace
-video
inting
ticket
espace
-radio
-power
alumni
logger
urance
tgames
points
-house
trends
ngames
sguide
assist
istore
yhouse
ervers
tforum
eradio
ealtor
orange
awyers
leader
omania
achina
palace
orever
uccess
tguide
spoker
keting
ttoday
uction
prints
secret
gmedia
camera
retail
echina
nature
enture
inside
lowers
offers
jewels
ogames
tology
states
custom
summit
knight
ickets
ystore
tpower
clicks
ahotel
cindia
pmedia
unding
cmedia
styles
dmusic
pmusic
-photo
ansoft
galaxy
egreen
ytoday
reader
addict
curity
illion
ournal
seller
ietnam
dallas
nspace
tindia
dstore
rescue
skills
-homes
schina
escort
luxury
tsales
lyrics
-space
tmoney
dhouse
movers
spring
talent
bazaar
arning
tvideo
dspace
dworld
cycles
kmedia
silver
tplace
ygroup
tstyle
raffic
ephoto
tchina
epoker
anager
esting
stones
ishing
mindia
rtgage
tgreen
-guide
taylor
active
cheats
eboard
imusic
alerts
update
imited
switch
ladies
ooking
ichina
-guild
guitar
uilder
artner
iamond
factor
weekly
thotel
nsport
-poker
athome
trucks
tlight
othing
arabia
insure
simple
gworld
shoppe
indows
lworld
ahomes
tdeals
nvideo
dealer
mmedia
hacker
guides
kgroup
anshop
oofing
legacy
sgames
chools
strong
israel
dtoday
dragon
lmusic
ymusic
hopper
tstuff
opolis
ershop
-girls
ngreen
illage
ociety
brazil
tattoo
aphoto
rspace
hockey
servis
condos
elight
erland
-loans
latino
icious
string
aforum
tpoker
cforum
ternet
rmusic
ravels
walker
ohouse
igames
mputer
tronic
istory
anclub
tcards
-movie
lmedia
ystyle
forest
ospace
nights
tudent
ilisim
yhotel
heater
agames
ontrol
strade
baobao
rworld
artist
dvideo
russia
ytimes
import
nshare
digest
relief
easing
tastic
rogram
astyle
nguide
arkets
tpoint
create
tlinks
-tools
tradio
tprint
-sport
junkie
ackers
equity
tstone
elinks
ntrade
impact
xmedia
reland
amania
mentor
laptop
ephone
quotes
denver
celebs
lindia
makeup
rhouse
ehomes
ecycle
rating
ground
ercent
brands
shotel
dpower
thouse
edrive
atalog
tpages
fgroup
boston
aindia
hentai
letter
outure
eserve
reless
eworks
irtual
buster
olding
nradio
ysales
ronics
ndream
yvideo
ebhost
oforum
towing
ountry
esales
marble
liance
nguyen
spaces
ewatch
ydeals
ilders
tphone
mworld
pparel
musica
avenue
ibrary
ecards
ulture
asport
onitor
ctions
martin
ygames
smiles
eville
dguide
payday
attack
svideo
atrade
areers
panama
spider
techno
estuff
dforum
juegos
-stone
mstore
llshop
-mania
atours
e-shop
promos
course
window
dvisor
eports
nworks
sbooks
recipe
-green
eality
shouji
tbooks
avideo
arshop
gtoday
bility
cworld
raders
smoney
ewhite
ertech
writer
agents
moving
oliday
kmusic
lgames
stable
rabbit
edeals
forall
helper
erblog
wright
dlight
a-shop
tnames
scards
dstyle
ggroup
dhotel
estock
erteam
babies
adgets
xperts
nstone
ostyle
editor
ochina
leshop
roduct
emagic
hmedia
basket
estone
eindia
sphoto
dlinks
aphics
backup
orsale
epress
-links
opedia
gstore
ohotel
nology
person
eacher
ohomes
omatic
awfirm
emovie
aspace
wizard
rganic
genius
arking
timage
cience
museum
ptions
emodel
tmagic
floors
torage
-money
fetish
ahouse
public
ission
rocket
ehotel
ollars
oclick
escape
lradio
sradio
herapy
extile
berlin
hoenix
rtners
killer
rgames
unlock
rosoft
rchive
thomes
operty
epages
landia
ntures
nboard
screen
elogic
rstyle
cancer
shomes
porter
sprint
-audio
etrain
nsales
dollar
carpet
tronix
flight
uality
etimes
enders
champs
atural
dsales
reedom
castle
wilson
garcia
ventos
etours
dplace
bridal
dpress
lstore
tparty
enotes
eating
rtoday
arcade
tbrand
ssales
anhost
gforum
findia
tprice
ogreen
maging
winner
bucket
closet
police
tphoto
dshare
hinese
growth
ection
nstock
esense
nforum
ourism
tevent
ystuff
echeck
tnotes
oradio
miller
runner
tclean
isting
ygirls
tmatch
s-shop
ottery
ension
themes
charge
esport
aemlak
tdaily
ohnson
cities
tfirst
atoday
ration
tracks
filter
dmoney
gadget
pworld
option
jordan
ymoney
nhomes
gratis
tguard
coupon
stours
ltrade
emetal
tscene
esults
edlife
dworks
memory
rstore
afilms
sstyle
nstuff
sistem
erclub
ingnow
fmedia
ndmore
osport
nhotel
ecrets
oguide
ttrade
cradio
yradio
dstone
ntimes
nbooks
inshop
dizayn
eparty
ywoman
temail
-sales
nphoto
diesel
coding
pstore
iindia
colors
tmovie
tfield
result
otrade
wgroup
estory
e4life
agreen
oworks
shares
nlogic
-porno
ibooks
-phone
onster
ateway
utdoor
nindia
tsense
turner
lorida
nwater
annews
cmusic
npower
ovideo
tshare
assets
twatch
tsites
rforum
elegal
wnload
i-shop
epower
equest
-books
banner
eforce
epoint
ecover
tserve
nparty
poster
aphone
thomas
npress
orlife
degree
mspace
gspace
cruise
nsight
mmusic
opoker
austin
ncepts
edream
oducts
inlove
yphone
ddeals
atcher
iotech
nprint
xworld
llsoft
sitesi
itchen
isplay
ywatch
tlogic
scapes
dprint
ingers
ousing
etting
unting
ymagic
eguard
freaks
etrack
safari
escore
anlong
ldings
spower
tshops
atings
ectric
hughes
esites
aradio
sfirst
ypress
yheart
hybrid
deluxe
ething
ontact
iverse
remail
nmetal
carter
tering
rotech
raphix
ources
dcards
tright
yprint
asarim
nplace
ercity
estart
alaska
esshop
efiles
papers
apress
elines
ispace
erline
ophoto
ystone
tcrazy
effect
seeker
ustoms
o-shop
worlds
tdream
-point
ndeals
nbrand
alance
enshop
xgames
ntrust
tvalue
dstock
intech
netics
kworld
entech
nevada
eative
icaret
lchina
ttable
client
meters
cebook
keeper
lassic
covers
parker
battle
-watch
itrade
ehappy
ewomen
tmetal
eeting
yshare
eshine
erzone
estudy
shield
erbank
erhost
canvas
oprint
icture
tlocal
cstore
yblock
yguide
eloans
region
mhouse
ngyuan
yphoto
-works
eshops
ersoft
tdrive
nelson
eclean
method
surfer
ydream
ertown
etrust
munity
ermail
weight
-model
l-tech
atches
number
twater
esolar
ratech
itclub
hicago
reward
target
epaper
ngland
dtrain
tebook
mports
temple
ihomes
trades
wishes
eshows
talarm
boxing
dnames
lpoker
pstyle
-dream
otball
ltoday
oporno
easure
tboard
ywhite
apower
npoint
gmusic
rsales
rinter
ewater
lvideo
burger
lspace
ewards
ebuild
uworld
yparty
iforum
isions
caster
tsound
hoster
ygreen
eprint
ipping
rokers
wisdom
sworks
tthink
hammer
asales
lforum
lovely
ihotel
echeap
scorts
lljobs
county
ematch
nmoney
eparts
-clean
olight
astone
ssport
owatch
mirror
pspace
ymatch
trails
tloans
efilms
nvoice
nbrown
lastic
ywater
slinks
kforum
octors
rparts
eblogs
pixels
mylife
atwork
dgreen
-angel
xgroup
strain
ntable
apoint
ghouse
kennel
llgame
dotcom
skorea
lement
abooks
morgan
rpower
yguard
tindex
inline
ypoker
weaver
dforce
ntrack
kontor
tfresh
s-tech
prices
ermany
poland
taiwan
ymovie
tables
teknik
dradio
nblack
-solar
dmagic
dchina
single
eshare
rshare
makers
nyoung
ybooks
meshop
etools
efresh
rofile
twoman
poetry
rmoney
tories
torder
uforum
animal
hiphop
device
otopia
ystock
aching
morris
nserve
nloads
rguide
vgroup
eprice
ydrive
ivideo
isport
christ
iajiao
xmusic
osters
aworks
ebuddy
e-tech
pguide
ndshop
attery
wanted
trooms
tviews
oparts
abella
bworld
onight
voyage
oplist
ojects
chrome
voices
tagain
opower
tdoors
patent
anders
talive
ypages
hmusic
dphoto
tforce
antasy
dmatch
erotic
locker
tspeed
nomics
wmedia
-board
xvideo
yworks
tfiles
ernews
trates
george
-shoes
nstate
eevent
ydaily
anlove
tcodes
dwater
tsigns
cripts
eglass
mvideo
ugroup
acking
pbooks
tshack
kerala
around
erinfo
tapple
ophone
nriver
efacts
sdeals
edaily
minder
ptoday
tpress
lander
reight
yfiles
llinfo
-print
tvoice
ontech
yclean
cookie
stlist
tpaper
rehber
nphone
ancial
pretty
dboard
oflife
-korea
innews
tstudy
mradio
ncards
others
raphic
tstate
telier
nmagic
rasoft
ersite
tstart
roshop
visual
-click
yhomes
reshop
tsport
eshoes
bstore
rtrade
spital
erhome
s-clan
-place
ewyork
ghting
ttimes
ington
galore
lothes
ylight
teshop
esound
eblack
nnotes
kspace
elocal
lplace
stsite
ypoint
nevent
tready
tworth
rstock
gossip
chance
otspot
oplast
psites
mation
mmerce
claims
hforum
xports
-files
better
dphone
sspace
oupons
nesoft
yfirst
tclear
onnews
conomy
aguide
anhome
zforum
avings
llness
opages
checks
dimage
assion
yachts
tablet
etshop
osales
winter
plates
egoods
etouch
rplace
yglass
banker
tbrown
rvideo
scount
graphy
botics
layers
nfield
mtoday
dprice
eagles
riders
oregon
values
makina
efight
omatch
vmedia
-cards
ttrack
wmusic
dlogic
karate
dwords
oclean
dfacts
vestor
herbal
grants
nsites
ethost
harris
antech
thappy
stools
dideas
swatch
inabox
cology
ourmet
myhome
tshirt
espeak
lumber
resale
seball
arties
andone
ytruck
legend
nblock
ancity
npages
apedia
nimals
dframe
istyle
sgreen
tlands
-share
rology
hearts
oolbox
nimage
llnews
spizza
ygoods
eapple
tosoft
tscore
ecodes
redits
leclub
lledge
ealthy
outube
pickup
yclass
odrive
rradio
otline
lldata
lhouse
tville
nparts
r-tech
mature
llking
devent
iaprop
telife
triver
erfect
doodle
tesoft
snacks
dwatch
blocks
onblog
kaixin
eclass
strace
record
vation
flicks
jersey
tspots
kstore
ndaily
member
llplus
ybrown
efunds
nbuild
eright
baobei
nglass
amovie
llover
hitech
ontime
-water
edhost
tglass
strike
anties
ingyou
apoker
llteam
sville
eguild
textra
ntouch
bright
tlines
maniac
rstars
titude
ortune
tstand
ssence
aparty
llmark
cigars
rhomes
bsites
efirst
angers
greens
reload
dgames
rhotel
stamps
dboxes
buyers
carbon
anjobs
obooks
ethere
natech
ystudy
llstop
nghong
stnews
kworks
unique
remind
itrust
obiles
stline
asting
sheets
tforms
omovie
udents
dbooks
prayer
incode
tparts
aiwang
phouse
htoday
pforum
tquote
-image
llbook
ypower
sfiles
refund
puzzle
omance
replay
itters
rphone
dmovie
villas
ysites
ystate
nsshop
incity
sthost
zmusic
etcity
howard
ingxin
gvideo
ctoday
sideas
status
obbies
tsshop
rmetal
madrid
npoker
erlife
tchair
nglife
nsound
dpoker
logics
888888
dbrand
llable
tricks
-light
zworld
llwear
witter
pradio
rlight
iphoto
tester
pocket
answer
eijing
manlaw
anwang
inders
shop24
powers
etones
return
erlive
beyond
tboxes
drinks
tanbul
eation
melody
gphone
tstaff
ansite
budget
lasses
nnames
aodian
bmusic
msales
yboard
abroad
ychina
frames
ocards
sindex
aholic
nguard
rsport
tstock
ddress
aylist
tseven
cguide
nwatch
reator
comedy
onshop
amazon
lmoney
mchina
ecipes
flyers
khouse
tnight
trivia
centro
stshop
dchild
anblog
stclub
ndrive
hamber
efield
nville
stbank
charts
policy
nshell
neshop
n-tech
isheng
powell
ttrick
insite
muscle
tsmile
asinos
ytrack
eready
nagain
ynight
signer
-squad
detail
ingles
stbest
worker
rdeals
eaders
tblack
nfilms
cyprus
llmore
smania
tcheap
mshare
nfresh
ixiang
ytrust
eading
2music
awater
notary
ngsite
llclub
rphoto
eideas
atimes
theweb
ogging
ospeed
arland
exshop
yfield
-anime
daniel
matech
oboard
eindex
ustore
esmith
isales
llland
nclean
dclass
dflash
rindia
lstone
thumbs
oshops
istone
eflash
ewoman
lltrip
evalue
-smart
llspot
unters
stmail
stdata
cooper
widget
tleads
onclub
taware
webdev
nwomen
-tokyo
mology
nvalue
ltools
ealing
shared
french
aptops
lltime
ogirls
lltown
ustice
oscope
rimage
little
gradio
nmodel
holder
erbook
dtruth
greece
rideas
dvoice
enames
lldeal
ranews
dmetal
llfree
recall
rehome
itours
bottle
summer
oshare
rotica
struck
ddream
anfree
imagen
elshop
omoney
otours
shions
llball
nkorea
llface
treach
-smile
illing
ndhost
ygifts
escope
-pages
ertalk
dotnet
quebec
inhome
sender
llhead
phuong
horses
archer
gchina
nlegal
ystick
ealive
chicks
reform
tsnews
amatic
dparts
xhouse
remlak
rgifts
onsite
launch
-stars
reline
erspot
emarks
nergia
xpower
ingart
atable
o-tech
rotect
orning
ancorp
erwear
gospel
tbuddy
gister
ndroid
armony
lender
tkorea
gupiao
ckmail
reisen
rwomen
tclass
-trust
olinks
sdrive
parade
dguard
ndyman
inclub
groove
kansas
oracle
ethink
dsites
harbor
stplan
bmedia
ercard
reclub
enclub
stlife
quared
llwork
iguide
esmile
sozluk
mpower
dstate
andate
nswers
hunder
edates
llfive
rebook
nsugar
nflash
rother
tfacts
tgirls
zanesi
otoday
viewer
eseven
milano
-warez
ingbox
atures
nemail
rstuff
ruises
llmail
yvoice
burada
vforum
ayclub
smovie
toshop
gemail
kradio
twrite
ngwang
ewords
nmovie
lookup
lltalk
dstart
emania
llsite
robots
rowser
tehome
splash
ynames
incorp
equick
dparty
ithere
senews
ytouch
egifts
zmedia
ourist
dapple
tplans
dpages
llcard
torial
lemail
-shops
etruth
tsouth
alking
ologic
basics
mworks
ncrazy
cooter
rsound
dscape
resume
nemlak
erpost
lister
rocess
oimage
whouse
mtrade
mother
a-club
webcam
twords
eclock
esolve
labels
folder
tflash
ensite
lllink
twhite
nspeed
yblack
pirate
ecoder
stress
rprint
dfiles
sparty
etlife
tfight
etclub
-blogs
ofiles
tdrink
rparty
cherry
grafix
yflash
yprice
llshow
t-shop
hsband
omagic
itfast
queens
etball
sticks
tmodel
tesite
adance
oodies
twheel
finity
kmoney
ynotes
yright
pgames
rlogic
escene
gindia
stings
ailing
rindex
rmagic
terman
etrace
yfresh
bubble
gmoney
nlines
speaks
pdrive
dsheet
inform
bphoto
oppers
-parts
chines
marina
enmark
mehome
logist
elover
itsoft
enspot
ccount
etaway
enlove
tcourt
spoint
wonder
ofilms
elease
etnews
stinfo
plants
etable
nlocal
ypeace
dnight
metals
nprice
retech
nashop
esignz
intime
yevent
amodel
ackson
stpark
renews
llfish
mental
tlegal
buying
itnews
bishop
oneill
dnotes
scents
dvalue
ttrust
ynergy
tblock
oindia
rsshop
stpost
tlists
ylists
twomen
tearth
llpack
arcode
llfire
tprime
dclean
yspeed
orless
deshop
ieshop
ingman
ngxing
refill
-party
stroom
andles
lstuff
cpower
spress
enblog
trance
odcast
ldeals
oating
tshell
stlive
famous
appeal
rstand
s-info
erloan
ngoods
adlife
lprint
idshop
dpoint
ession
llpass
averde
turizm
couple
bamboo
imania
amonds
irways
yframe
dfilms
mkorea
nlinks
t-tech
atclub
lesson
rtours
erpark
enesis
tmarks
sphone
eorder
e-club
futbol
graham
ometer
ojapan
esclub
s4kids
arhost
ofresh
ininfo
button
optics
ishare
ntalks
indian
mpoker
edtech
ozilla
egreat
yimage
ontent
nshows
dthink
agenda
rkshop
rpress
newman
tsmart
rboard
ngshop
epedia
ylinks
edshop
nthing
inners
pvideo
ocracy
latina
stereo
llcell
burner
-times
chuang
nalive
ayshop
ihouse
mumbai
stfind
ylogic
owater
etrics
awatch
-dance
aninfo
nurses
hworld
nhorse
ofirst
stcard
nmatch
stview
tfunds
echinc
ycross
slight
otnews
resell
bchina
dearth
intown
daware
eriver
gamerz
nrates
wstyle
ashing
rlinks
crunch
golden
-spain
yindex
naware
essage
umusic
gamble
ndview
ppower
ecraft
ebreak
saigon
suites
evoice
llport
egrees
ostone
ycrazy
hostel
strate
pwatch
owtime
sepeti
nstick
listen
icheng
e-info
anbase
magnet
nsigns
stpage
heatre
ckorea
eporno
oholic
dwhite
debook
tlanta
a-tech
sguild
nsmith
ranger
dshows
erpack
sydney
telcom
anfood
atshop
ybrand
cksoft
arcare
sboard
ostock
nsider
tlabel
oberts
itjobs
ywords
puters
erates
matica
arnews
shadow
napple
dindex
adhost
elists
counts
tfound
rishop
yorder
dinner
angame
rgreen
notech
tsheet
nscene
seshop
ridges
llhost
sgirls
rhythm
ensoft
i-tech
atrans
stteam
ikorea
llsell
-flash
saints
tspain
lstyle
ooster
mpress
niture
ycycle
wbooks
ytalks
mhomes
llsign
mmoney
obrand
scheap
lguide
itshop
namics
adclub
eimage
llinks
profil
lehome
dsound
brooks
theart
edfish
cheese
llease
renter
ndmail
oblogs
alstom
dining
dheads
nghome
epatch
regood
eemlak
rgirls
anplay
tframe
kindia
etbook
llmind
anning
insoft
dtouch
ineart
itteam
saudio
llride
rspeed
antong
arnold
tutors
ystaff
orfree
ourney
aplace
everse
irline
tcross
oflash
onfire
armail
ostage
llhall
aunion
edance
teclub
bforum
stband
fisher
shells
ivorce
ytrade
dstuff
gphoto
aloans
llside
ysmall
eunion
eminar
tenews
foster
thorse
efront
ewaste
inmail
rofits
ctures
tideas
edaddy
stidea
inyuan
tfloor
emaker
ystart
tcases
llfilm
llfile
s-corp
toblog
sebook
ennett
perman
dsigns
tgraph
ncodes
lllove
onhome
mgames
etopia
ymetal
tscale
mgifts
avista
dcycle
aysoft
ysense
ebates
utopia
obilya
lesoft
tsharp
llband
aowang
smodel
ookies
ancard
dcheap
tbreak
rewear
ysound
arbear
bakery
onkeys
twhere
tcalls
ebytes
ayment
inator
rnames
dtrack
outfit
-stuff
ateens
tepage
r-shop
a-info
ipower
llbits
ebrand
allife
etruck
tsmall
remium
yapple
signal
toyota
ndlife
espark
amaria
ngyang
tzilla
ortech
inblog
tiques
kpoker
bspace
ythink
singer
urkiye
llroom
intage
llfeed
tstage
-tours
lehost
terior
bgifts
lmetal
akorea
dplant
ysmile
-staff
talone
eaning
amping
arclub
rbrand
tlevel
othere
aville
hchina
finger
tclock
ttotal
irsoft
semail
rchina
stfilm
yscore
equote
yshops
ndpost
dthing
erator
warren
kphoto
urfing
ergirl
nstudy
rforce
dblock
uitars
nsmile
xradio
ysigns
nworth
brides
dwoman
nelive
pshare
hengda
dminds
eocean
sweden
ntruck
dsport
iloans
tcover
llcall
strack
rsites
tsight
ysmart
aplast
erooms
atroom
gworks
dzilla
ntours
andbox
rtists
nclick
esblog
tideal
-films
dpaper
dgifts
ndoors
top100
stwise
bhomes
tplays
strust
-touch
ekorea
sstuff
npaper
lawson
eoffer
ything
evital
xplore
dmodel
rworks
urveys
murphy
-gifts
rpoint
sclass
nlight
alegal
nforce
flores
tabout
dtimes
xindia
inwork
s-club
stscan
tposts
stblue
rehost
etbank
armacy
itlist
chshop
ealers
lenews
dspeed
a-soft
nebook
antown
lhomes
yhappy
esorts
timate
tivity
loader
dlands
ndmark
tplant
ologia
rylove
smatch
ncycle
rewall
ornews
alogic
licker
rstart
tsblog
nsense
ndance
listed
ckbook
areyes
norton
lpower
ingcar
llcity
ylines
tsland
yideas
kchina
terror
ormail
usters
urgery
ncover
watson
estech
nscale
luntan
es4you
steady
futuro
ngsoft
ndtime
ngkong
neland
aylove
ebrain
angood
ennews
sttime
nstand
arcard
etsite
tcount
efaith
anwest
mstory
legion
itizen
oronto
desire
avatar
itopia
adsite
limage
atlife
ndlook
utures
ghomes
eslife
teplus
yforum
tucson
idgets
gnames
oncall
venice
-today
tchers
ycheap
llgirl
essons
inlong
ucking
owshop
talker
ehorse
erotik
ocrazy
lunwen
yoffer
ttests
radise
stdown
ndbank
savior
arwear
ectory
tnorth
stroke
mpages
stlink
rizona
ingold
anteam
hmoney
e-plus
gitech
lynews
erfilm
yshoes
stroll
cutter
llhere
theads
ellers
theory
aboard
denews
atplan
ecrazy
tneeds
ighter
llfast
apples
llaway
topics
dpeace
anhead
lelive
otable
-union
rmatch
refood
ttruck
atrust
salive
llhome
yplant
ayhome
ockers
ttruth
egitim
semlak
tsplus
ytruth
llback
nshops
amirez
tehere
dblack
rystal
owjobs
llfarm
yfront
ership
ertime
nlinea
o-news
duoduo
stloan
ostory
s-news
itmail
masoft
istech
llview
arplay
diving
divine
lledit
arents
yalive
sthunt
tchain
dextra
rition
veland
dsight
tnoise
pdates
llpost
tquick
ograph
ndback
xstore
eteens
yclear
llhelp
rlines
ryland
arlive
ctrade
strans
nwoman
stbuys
nfiles
tdates
llbase
ympics
tfilms
ngfood
ithost
anners
rrents
ontana
lindex
kpages
tskill
lldrop
metrix
-egypt
artoon
nclass
novate
dcover
ebella
mphoto
onjobs
dready
-trans
llnext
inlink
otsoft
arsite
iashop
t-host
ebgame
ajapan
feeder
sthelp
scribe
ngraph
visors
bistro
friday
lmagic
onhost
intalk
ttitle
antime
ghotel
iaclub
onsoft
yplace
vering
etalks
arfish
rtrain
rcourt
tstick
rstone
estars
alshop
llthat
wallet
dehost
mplace
tbasic
onseil
wstore
ejones
jgifts
reside
anline
f-clan
ogoods
nstart
ickers
hgames
btrade
nstage
gsales
llplan
ancing
eafood
cement
ollins
inette
ngclub
lllife
nfight
llcode
edepot
tproof
tribes
dright
dalarm
bounce
candle
rgains
ouncil
submit
aifeng
treats
planes
meimei
ddaily
onfree
stfree
esinfo
-salon
nquote
moveis
ywomen
ayblog
eplaza
ebanks
umbers
erhead
junior
ssmart
-press
stwear
espeed
remark
oglass
patrol
pieces
tunnel
philly
ebites
itcard
ametal
emails
sadmin
andata
estick
itamin
fields
eclick
nhands
iology
umedia
tshows
n-shop
llsafe
ayhost
ytrees
edgirl
pkorea
nsmall
ychild
linker
tender
pchina
nshine
rfilms
ndsoft
asites
llzero
rsmile
enting
ysport
eclips
ouston
rtrack
rsense
tfeeds
sunday
tlocks
stcity
stcare
rfront
stflow
rashop
pinion
ywheel
dscore
econds
oville
gpower
thands
nnight
ystory
ustech
e-mail
tabase
hookup
ybuild
rwatch
ycards
osites
mailer
haring
rtland
livery
eminds
rlando
indate
stwalk
erring
nlabel
dstand
stdoor
otrans
yellow
idwest
otimes
ntools
anhelp
tepost
onsale
l-shop
aycare
refree
yparts
llcare
ereyes
llbyte
dquote
dcodes
starea
ements
ndrops
eprime
csales
acards
-admin
ysshop
meland
antage
msmart
stpath
iradio
mazika
rights
dwards
nework
ptical
savers
haopin
l-auto
gordon
reamer
pstuff
lancer
ylegal
orally
esland
tgoods
anfund
tloads
nindex
ologie
-first
oxygen
atlive
ewants
seplus
gslist
andyou
psales
pworks
boiler
ncheap
atsoft
ndable
llcase
ercorp
anhair
inhost
tavern
yshell
harter
dhuman
season
ongxin
efocus
tchart
pointe
inshow
kgames
lllist
alight
nanews
larson
rpages
aforex
lllock
amtech
plight
prince
otions
e-city
-craft
roller
arport
ngblog
llcore
nwords
pology
llpath
gtrade
ebucks
esplan
omundo
dtrust
aysale
anpark
oplace
stface
tissue
adream
dition
roland
lpoint
adshop
snight
mclean
ompare
dscene
hikaku
ehobby
onland
astand
uncity
manual
stdate
isense
rwhite
-voice
angone
sketch
meclub
chotel
medium
brains
anpost
edrink
inplay
etooth
taward
pazari
telink
etters
n-info
tstalk
escale
ckband
etpark
llpool
dfloor
iracle
minute
clouds
t4life
bikers
nchain
llsale
yabout
ayaway
ershow
ayfood
yscene
oodles
e-auto
crisis
atinfo
esleep
loblog
ollect
lywood
plocal
ynorth
dsmile
atfree
dcount
echain
ldwide
entity
soffer
owhere
tacafe
eleven
atbook
telist
itools
edtube
123456
llscan
yplans
atools
tbites
pcards
a-news
decard
gstock
llfood
saving
tscrap
phomes
estand
erdate
masala
mining
ingren
-trend
rforms
eshack
rguard
ingnet
-teens
anland
igeria
yhorse
norman
esells
lldown
dglass
gwatch
orkids
toffer
c-clan
blinds
erlist
medias
erform
retown
igorta
eagain
peline
-check
ndnews
notice
parent
oncorp
iberty
kultur
neinfo
arfilm
ecount
nspeak
rtools
isters
atback
ashare
ertube
ayplay
kecity
untech
stmove
breeze
cjapan
simage
ndates
dernet
ndsend
buddha
eorgia
cklife
ndwire
-drive
mideas
fmusic
tbuild
omtech
raclub
allday
chomes
atrace
leblog
stcell
llhill
motech
cables
inface
mphone
itches
remote
aranch
telite
eclear
jensen
stcode
erface
sprice
stpass
tronik
ndwise
sonlaw
gether
norama
maimai
yshome
stsale
sdepot
hradio
eheads
visage
tspeak
locity
mforum
dstaff
ktrade
owdata
dwomen
nthony
anbest
jacobs
affing
idnews
sarang
ssites
stluck
efarms
always
ealone
lllive
sorder
llbars
tsleep
pindia
erlove
ergear
tohome
dcrazy
teinfo
ebears
llring
sjapan
elling
elline
albums
ndlink
lmania
phobia
fights
ercode
neline
ncourt
ndfire
llbank
tangel
tyshop
inpark
stspot
barter
ebound
etjobs
nehome
armine
duncan
ckblog
eakers
stedge
stival
stlove
arocks
dstick
llrace
efense
ecruit
sevent
rscore
please
s4sale
recare
europa
stname
sfilms
rojekt
amoney
tspark
tecall
epairs
stopen
dshell
lishop
olland
demand
leland
adtown
barnes
swords
kaudio
dshops
dfresh
eaudio
tlearn
dayspa
tmaker
stmode
yboxes
sthome
epeace
ypaper
lldate
coders
anblue
thuman
mewire
erules
tslist
stlead
nginfo
admark
tlarge
iworks
azilim
cotton
nasoft
actors
llpart
tvnews
liners
hanson
orblog
echair
oflove
ybytes
erwire
ychair
etmail
tdigit
llmake
eround
anhill
anball
wcards
snotes
rvital
stjobs
ngtong
asblog
jchina
ardens
enight
ommune
ilters
jgroup
urious
aterra
manage
combat
atnews
llnote
llword
igreen
nright
critic
ststop
rebank
sglass
aimage
stsafe
stplus
enerji
remier
tianyi
-adult
yloans
nesite
rstate
arinfo
eabout
enline
llwell
tsclub
ddrive
ndhome
inmore
nscore
esktop
berger
lgreen
bilder
opress
renote
stride
oveyou
heroes
stmine
ddoors
odream
gsites
usical
sthits
aysite
anlist
ardlaw
arcity
anmail
tfolio
uysell
ticles
stlook
rpeace
cesoft
dlines
seguro
ssnews
ewrite
debate
sparks
sthere
rflash
agifts
edhome
ealbum
lshare
etsoft
elabel
tenter
stfair
llwire
tecard
hhomes
tegame
onpark
santos
ockets
retime
acific
asound
ractor
eearth
tsinfo
colour
otools
eheart
nearth
yshort
oncafe
tahost
icards
esalon
ealarm
pquest
isgood
epiece
eyoung
aytime
-plaza
kmovie
eplays
ralive
muzica
troute
arteam
inking
ripper
erporn
teries
mitech
places
artech
tclick
resoft
treads
ersity
riting
kstock
snmore
wspace
enfree
iteasy
tcatch
stless
trials
stwell
nelife
spedia
tpicks
inlife
xlogic
prings
withme
enbank
yserve
tgreat
eyshop
a-team
tthere
rehair
stbook
arbank
stboat
ntrain
adloan
iplace
anfang
dsense
overde
ogamer
ckline
anghai
stfast
atfire
ersale
tthing
argain
rmodel
yrules
leview
ckshop
astart
llplay
esmall
anroom
pstock
nguild
ycause
ostart
oemail
rewine
rewind
stball
yriver
signup
dustry
alling
mtrack
eblock
onking
abrand
llroll
nwhite
wpoker
nready
ndclub
dshape
mepage
ndloan
cuties
youhui
ysmith
resave
railer
ntrail
hosted
ghosts
kuwait
odance
ancers
atters
moment
xtrade
arline
dcalls
arlink
dvance
render
urgeon
nplans
dranch
ontest
litics
matter
atdeal
yfeeds
bidder
noffer
slogic
modern
pblogs
ellout
illies
ndcard
rogers
xfiles
rplant
hindia
voting
nedate
isalon
yville
seclub
llline
ejeans
pmatch
ennedy
dcourt
yquote
-proxy
voyeur
atspot
rorder
llfund
dhomes
atfarm
stmore
eneral
elimit
refile
rymail
-women
loaded
ctours
okmark
epicks
rguild
pflash
neking
rnotes
aylive
ttrash
rdcore
ngirls
hphoto
anlife
shuang
erfeed
stsoft
tranet
-forex
adshow
isinfo
stpipe
odeals
elclub
nomore
itlive
eroute
tcheck
rticle
ndfast
getech
lesale
tecity
egirls
rbooks
ocross
binder
yroute
osmart
trules
-glass
yclock
ndlist
intong
kgifts
ryjobs
joseph
espain
tigers
blades
rriage
stshow
atpage
kloans
cosmos
yscale
pilots
canner
-magic
stfarm
tahead
gpoker
rbuddy
regame
memail
stform
esugar
stsell
eplast
abanks
-earth
ndcall
gesoft
inland
nchair
llmode
palmer
stback
cebank
linics
adfree
mhotel
dmarks
aparts
tshoot
tgifts
sclean
making
o-info
s-life
lphone
sample
owfarm
ebabes
zgroup
teaway
nideas
dazzle
ricket
doffer
swater
stdeal
oparty
ontown
c-info
etdeal
ylands
kraine
ystage
olocal
dspeak
erband
ckpack
estorm
ereach
etreat
rpoker
htools
relive
cetech
stgain
reeman
anedge
eshift
gibson
elland
ataway
router
sheart
teland
holdem
rtrust
-night
eshome
pmovie
remore
anname
teline
ssmail
-order
cklock
jungle
sworth
infast
actice
scover
efolks
kcards
wprint
rapple
mesoft
apharm
tolife
ttouch
escent
ssions
nloans
yplane
etcard
idhome
stbase
wgames
pjapan
bpoker
tlucky
ytrain
sinema
nfloor
araway
oliver
inplus
rdrive
cphoto
former
rbreak
mwatch
ggreen
owcase
lsites
ashair
iwatch
itpark
arsoft
talife
llbill
elding
yfinds
handel
ntrace
ktoday
otolog
marker
ywhere
alsite
ffroad
xforum
smagic
nthego
sdream
ragons
terbox
ircuit
tevens
lanews
mentum
anshow
ascene
ention
alinda
stgood
tloose
owroom
erlady
cvideo
nsells
nahead
urismo
ustyle
stport
scores
anstop
rising
paylas
sthink
mylove
tycoon
ewalls
selive
iboard
selove
efever
manuel
icycle
ntnews
chaser
lltext
terate
p-shop
ocloud
stcore
tunion
niform
atland
tbytes
epills
staway
etarea
ycover
aymail
ymodel
tbrief
pdeals
lidays
ostate
bloger
iyorum
ckcard
dmoves
4money
reroom
orgasm
xdeals
ntotal
andwin
atcall
llheat
stella
medics
eframe
-paris
oggers
lboard
ecalls
istics
ansign
tsolar
sloans
recell
eltech
iginal
uhotel
stwork
ndface
tdepot
dyoung
stword
artree
rtimes
tetalk
yfunds
glinks
inwell
eforms
mecare
earing
enhome
smetal
p-tech
inbest
ydrink
holics
-force
inding
ttrain
xhotel
neblog
recity
allace
ehaven
stease
ndlive
tshoes
owners
telect
yvalue
anbook
oright
ichael
lphoto
banana
pecial
eplane
nergie
efloor
tsugar
-guard
3media
anjing
reways
etlink
anbank
aohang
edback
sitter
ylocal
ticity
ickson
etgirl
tcolor
bisnis
tetime
viagra
llhard
svegas
mantra
ntopia
ndless
osolar
recard
erries
lanner
inlist
steele
entown
odaily
tapart
laudio
nrules
netcom
oserve
anlady
ebrief
dfirst
ingdom
erfree
bikini
xhomes
ebrown
gstone
apages
letime
incare
incard
slegal
aynews
erfile
astech
necity
eology
stgame
elands
onmail
maison
nfirst
ethome
ymania
format
adview
derman
trendy
llblue
ourage
savage
ploans
ablues
object
yalbum
ensale
m-shop
ntrial
ercast
dstudy
ayland
ewhere
esouth
anreal
salary
sproxy
etcare
nproof
biznes
llpark
utique
vendor
rstick
visits
sdance
rstudy
mplant
stbids
golfer
ashnow
tshape
oquote
stitem
oevent
obuddy
-metal
talong
ceblog
itrace
oearth
veclub
tscope
arlist
ntlife
hspace
i-news
ercare
nplant
rblogs
essite
stlots
t-info
stlift
ndplus
holmes
dlegal
rysoft
figure
streal
atwalk
butler
itlove
clever
agirls
-sites
ailbox
otlist
svalue
irport
edcars
ipoker
tquest
aydeal
sattic
yshift
ckcity
yagain
ftrade
onlife
resite
rcycle
alland
llpage
nlodge
tfront
doshop
adjobs
ersion
erbody
llthis
ngshow
inbank
asolar
llbest
ndcode
earson
xposed
netube
tdaddy
stwall
uneral
sttalk
amonth
linked
wvideo
reinfo
korean
nksoft
tspage
yhello
erfood
s4less
andbag
ckable
-woman
y-tech
kesoft
rsigns
rdware
rdaily
leteam
rshall
stries
ajones
inbook
stmark
ealert
ngdong
abetes
owhite
emoves
ngtech
ckfree
tstars
eshirt
tefeed
entist
ancall
teight
ntruth
ndover
yhands
ndhere
ashley
easons
ndlock
weblog
ithome
stland
aojian
iggles
teller
llwind
lltape
iberia
alysis
dancer
rabber
ythere
injobs
lorist
ellnow
locate
ldaily
dprime
o-life
tracer
eplans
nedown
mlight
esteel
otwear
ryhome
uangou
dgirls
volume
addies
courts
ysight
lltell
nalbum
d4sale
ckfind
andeal
rfacts
-shine
stnote
ipsite
ingpro
""".split()
for index, name in enumerate(POPULAR_SUFFIXES[6]):
SUFFIX_SCORES[name] = (4301 - index) / 4301.0
def prefix_score(name):
best_score = 0.0
best_prefix = ''
for length in range(2, min(6, len(name)) + 1):
prefix = name[:length]
score = PREFIX_SCORES.get(prefix, None)
if score > best_score:
best_score = score
best_prefix = prefix
return best_score, best_prefix
def suffix_score(name):
best_score = 0.0
best_suffix = ''
for length in range(2, min(6, len(name)) + 1):
suffix = name[-length:]
score = SUFFIX_SCORES.get(suffix, None)
if score > best_score:
best_score = score
best_suffix = suffix
return best_score, best_suffix
|
jcrocholl/nxdom
|
prefixes/popular.py
|
Python
|
mit
| 298,933
|
[
"ADF",
"ASE",
"Amber",
"BLAST",
"Brian",
"CASINO",
"CDK",
"Elk",
"Galaxy",
"Jaguar",
"MOE",
"MOOSE",
"ORCA",
"VisIt"
] |
84bda6c550ac53ff9edcef893314e1fb28230171ce05b2c3b5ecd763ededb30a
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
'''
Created on Sep 9, 2009
@author: tw55413
'''
import logging
import sys
logger = logging.getLogger('camelot.view.model_thread.signal_slot_model_thread')
from PyQt4 import QtCore
from camelot.core.utils import pyqt
from camelot.core.threading import synchronized
from camelot.view.model_thread import ( AbstractModelThread, object_thread,
setup_model )
from camelot.view.controls.exception import register_exception
#
# Wrap and unwrap None passed through signal/slot accross threads to
# prevent segfaults with PySide
#
# https://bugreports.qt-project.org/browse/PYSIDE-17
#
if pyqt:
wrap_none = lambda x:x
unwrap_none = lambda x:x
else:
class Null( object ):
pass
null = Null()
def wrap_none( func ):
def new_func( *args ):
y = func( *args )
if y == None:
return null
return y
return new_func
def unwrap_none( func ):
def new_func( x ):
if x == null:
x = None
return func( x )
return new_func
class Task(QtCore.QObject):
finished = QtCore.pyqtSignal(object)
exception = QtCore.pyqtSignal(object)
def __init__(self, request, name='', args=()):
QtCore.QObject.__init__(self)
self._request = request
self._name = name
self._args = args
def clear(self):
"""clear this tasks references to other objects"""
self._request = None
self._name = None
self._args = None
def execute(self):
logger.debug('executing %s' % (self._name))
try:
result = self._request( *self._args )
self.finished.emit( result )
#
# don't handle StopIteration as a normal exception, but return a new
# instance of StopIteration (in order to not keep alive a stack trace),
# and to signal to the caller that an iterator has ended
#
except StopIteration:
self.finished.emit( StopIteration() )
except Exception, e:
exc_info = register_exception(logger, 'exception caught in model thread while executing %s'%self._name, e)
self.exception.emit( exc_info )
# the stack might contain references to QT objects which could be kept alive this way
sys.exc_clear()
except:
logger.error( 'unhandled exception in model thread' )
exc_info = ( 'Unhandled exception',
sys.exc_info()[0],
None,
'Please contact the application developer', '')
# still emit the exception signal, to allow the gui to clean up things (such as closing dialogs)
self.exception.emit( exc_info )
sys.exc_clear()
class TaskHandler(QtCore.QObject):
"""A task handler is an object that handles tasks that appear in a queue,
when its handle_task method is called, it will sequentially handle all tasks
that are in the queue.
"""
task_handler_busy_signal = QtCore.pyqtSignal(bool)
def __init__(self, queue):
""":param queue: the queue from which to pop a task when handle_task
is called"""
QtCore.QObject.__init__(self)
self._mutex = QtCore.QMutex()
self._queue = queue
self._tasks_done = []
self._busy = False
logger.debug("TaskHandler created.")
def busy(self):
""":return True/False: indicating if this task handler is busy"""
return self._busy
@QtCore.pyqtSlot()
def handle_task(self):
"""Handle all tasks that are in the queue"""
self._busy = True
self.task_handler_busy_signal.emit( True )
task = self._queue.pop()
while task:
task.execute()
# we keep track of the tasks done to prevent them being garbage collected
# apparently when they are garbage collected, they are recycled, but their
# signal slot connections seem to survive this recycling.
# @todo: this should be investigated in more detail, since we are causing
# a deliberate memory leak here
#
# not keeping track of the tasks might result in corruption
#
# see : http://www.riverbankcomputing.com/pipermail/pyqt/2011-August/030452.html
#
task.clear()
self._tasks_done.append(task)
task = self._queue.pop()
self.task_handler_busy_signal.emit( False )
self._busy = False
class SignalSlotModelThread( AbstractModelThread ):
"""A model thread implementation that uses signals and slots
to communicate between the model thread and the gui thread
there is no explicit model thread verification on these methods,
since this model thread might not be THE model thread.
"""
task_available = QtCore.pyqtSignal()
def __init__( self, setup_thread = setup_model ):
"""
@param setup_thread: function to be called at startup of the thread to initialize
everything, by default this will setup the model. set to None if nothing should
be done.
"""
super(SignalSlotModelThread, self).__init__( setup_thread )
self._task_handler = None
self._mutex = QtCore.QMutex()
self._request_queue = []
self._connected = False
self._setup_busy = True
def run( self ):
self.logger.debug( 'model thread started' )
self._task_handler = TaskHandler(self)
self._task_handler.task_handler_busy_signal.connect(self._thread_busy, QtCore.Qt.QueuedConnection)
self._thread_busy(True)
try:
self._setup_thread()
except Exception, e:
exc_info = register_exception(logger, 'Exception when setting up the SignalSlotModelThread', e)
self.setup_exception_signal.emit( exc_info )
self._thread_busy(False)
self.logger.debug('thread setup finished')
# Some tasks might have been posted before the signals were connected to the task handler,
# so once force the handling of tasks
self._task_handler.handle_task()
self._setup_busy = False
self.exec_()
self.logger.debug('model thread stopped')
@QtCore.pyqtSlot( bool )
def _thread_busy(self, busy_state):
self.thread_busy_signal.emit( busy_state )
@synchronized
def post( self, request, response = None, exception = None, args = () ):
if not self._connected and self._task_handler:
# creating this connection in the model thread throws QT exceptions
self.task_available.connect( self._task_handler.handle_task, QtCore.Qt.QueuedConnection )
self._connected = True
# response should be a slot method of a QObject
if response:
name = '%s -> %s.%s'%(request.__name__, response.im_self.__class__.__name__, response.__name__)
else:
name = request.__name__
task = Task( wrap_none( request ), name = name, args = args )
# QObject::connect is a thread safe function
if response:
assert response.im_self != None
assert isinstance(response.im_self, QtCore.QObject)
# verify if the response has been defined as a slot
#assert hasattr(response, '__pyqtSignature__')
task.finished.connect( unwrap_none( response ),
QtCore.Qt.QueuedConnection )
if exception:
task.exception.connect( exception, QtCore.Qt.QueuedConnection )
# task.moveToThread(self)
# only put the task in the queue when it is completely set up
self._request_queue.append(task)
#print 'task created --->', id(task)
self.task_available.emit()
@synchronized
def stop( self ):
self.quit()
return True
@synchronized
def pop( self ):
"""Pop a task from the queue, return None if the queue is empty"""
if len(self._request_queue):
task = self._request_queue.pop(0)
return task
@synchronized
def busy( self ):
"""Return True or False indicating wether either the model or the
gui thread is doing something"""
while not self._task_handler:
import time
time.sleep(1)
app = QtCore.QCoreApplication.instance()
return app.hasPendingEvents() or len(self._request_queue) or self._task_handler.busy() or self._setup_busy
def wait_on_work(self):
"""Wait for all work to be finished, this function should only be used
to do unit testing and such, since it will block the calling thread until
all work is done"""
assert object_thread( self )
app = QtCore.QCoreApplication.instance()
while self.busy():
app.processEvents()
|
jeroendierckx/Camelot
|
camelot/view/model_thread/signal_slot_model_thread.py
|
Python
|
gpl-2.0
| 10,093
|
[
"VisIt"
] |
beaf197ff57c1d3042b7f0dbf4921a388751f1688c6914c940edeaccd22cd80d
|
#!/usr/bin/python
import os, glob, shutil
import model_param as mc
def transfer(filename):
path, name = os.path.split(filename)
f = name.split('_')
print "Transferring... " + name
# netCDF file destination base folder
dst = mc.data_directory
if(f[1] == 'CORE'):
shutil.move(filename, '%s/core_entrain/' % (dst))
elif (f[1] == 'CLOUD'):
shutil.move(filename, '%s/condensed_entrain/' % (dst))
else:
shutil.move(filename, '%s/variables/' % (dst))
return
if __name__ == "__main__":
transfer()
|
lorenghoh/ent_analysis
|
conversion/nc_transfer.py
|
Python
|
mit
| 602
|
[
"NetCDF"
] |
dbd5a6d4c90fa7dab8cdf0154148a0cbd641699f957420574e08dee7a8f5d5f6
|
# only pull once per stock
# urllib2 opens http urls (authentication, redirections, cookies, etc.)
import urllib2
# timer not to overload sites
import time
import os, sys
stockToPull = 'AAPL' # AAPL, FB, UAA
stockRange = '1y' # 1y, 10d
def pullData(stock):
try:
# one time creation of stock's data to be stored here
fileLine = stock + '.txt'
# possible overwrite of text file
os.remove(fileLine) if os.path.exists(fileLine) else None
urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/' + stock + '/chartdata;type=quote;range=' + stockRange + '/csv'
# visit, open, and split url
openedSite = urllib2.urlopen(urlToVisit).read()
splitSite = openedSite.split('\n')
# grab valid lines after values esction
for eachLine in splitSite:
splitLine = eachLine.split(',')
if len(splitLine) == 6:
if 'values' not in eachLine:
# append, don't write to not clear file
saveFile = open(fileLine, 'a')
lineToWrite = eachLine + '\n'
saveFile.write(lineToWrite)
# remove last line
readFile = open(str(stock) + '.txt')
lines = readFile.readlines()
readFile.close()
w = open(str(stock) + '.txt', 'w')
w.writelines([i for i in lines[:-1]])
w.close()
print 'Pulled', stock
print 'sleeping'
time.sleep(2)
print 'done'
except Exception, e:
print 'error in main():', str(e)
pullData(stockToPull)
|
Darthone/bug-free-octo-parakeet
|
technical-analysis/2-get-stock-prices.py
|
Python
|
mit
| 1,368
|
[
"VisIt"
] |
6440e77eab91e78e0dae04bb93051bf64f3b9f4ddda92ac954fd9938abc6bf49
|
from ase import Atoms
from ase.structure import molecule
from ase.visualize import view
from gpaw import GPAW
from gpaw.wannier import Wannier
calc = GPAW(nbands=5)
atoms = molecule('CO')
atoms.center(vacuum=3.)
atoms.set_calculator(calc)
atoms.get_potential_energy()
# Initialize the Wannier class
w = Wannier(calc)
w.localize()
centers = w.get_centers()
view(atoms + Atoms(symbols='X5', positions=centers))
|
robwarm/gpaw-symm
|
doc/exercises/wannier/wannier-co.py
|
Python
|
gpl-3.0
| 411
|
[
"ASE",
"GPAW"
] |
db228a1df4e6fbc6b3e22fbb32fff01804c88449d0b8d975a3a95ae2cf3e98b8
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Classes for representing multi-dimensional data with metadata.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from xml.dom.minidom import Document
import collections
import copy
import datetime
import operator
import warnings
import zlib
import biggus
import numpy as np
import numpy.ma as ma
from iris._deprecation import warn_deprecated
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
import iris.analysis._interpolate_private
import iris.aux_factory
import iris.coord_systems
import iris.coords
import iris._concatenate
import iris._constraints
import iris._merge
import iris.exceptions
import iris.util
from iris._cube_coord_common import CFVariableMixin
from functools import reduce
__all__ = ['Cube', 'CubeList', 'CubeMetadata']
class CubeMetadata(collections.namedtuple('CubeMetadata',
['standard_name',
'long_name',
'var_name',
'units',
'attributes',
'cell_methods'])):
"""
Represents the phenomenon metadata for a single :class:`Cube`.
"""
__slots__ = ()
def name(self, default='unknown'):
"""
Returns a human-readable name.
First it tries self.standard_name, then it tries the 'long_name'
attribute, then the 'var_name' attribute, before falling back to
the value of `default` (which itself defaults to 'unknown').
"""
return self.standard_name or self.long_name or self.var_name or default
# The XML namespace to use for CubeML documents
XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2"
class _CubeFilter(object):
"""
A constraint, paired with a list of cubes matching that constraint.
"""
def __init__(self, constraint, cubes=None):
self.constraint = constraint
if cubes is None:
cubes = CubeList()
self.cubes = cubes
def __len__(self):
return len(self.cubes)
def add(self, cube):
"""
Adds the appropriate (sub)cube to the list of cubes where it
matches the constraint.
"""
sub_cube = self.constraint.extract(cube)
if sub_cube is not None:
self.cubes.append(sub_cube)
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilter` by merging the list of
cubes.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilter(self.constraint, self.cubes.merge(unique))
class _CubeFilterCollection(object):
"""
A list of _CubeFilter instances.
"""
@staticmethod
def from_cubes(cubes, constraints=None):
"""
Creates a new collection from an iterable of cubes, and some
optional constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pairs = [_CubeFilter(constraint) for constraint in constraints]
collection = _CubeFilterCollection(pairs)
for cube in cubes:
collection.add_cube(cube)
return collection
def __init__(self, pairs):
self.pairs = pairs
def add_cube(self, cube):
"""
Adds the given :class:`~iris.cube.Cube` to all of the relevant
constraint pairs.
"""
for pair in self.pairs:
pair.add(cube)
def cubes(self):
"""
Returns all the cubes in this collection concatenated into a
single :class:`CubeList`.
"""
result = CubeList()
for pair in self.pairs:
result.extend(pair.cubes)
return result
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilterCollection` by merging all the cube
lists of this collection.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilterCollection([pair.merged(unique) for pair in
self.pairs])
class CubeList(list):
"""
All the functionality of a standard :class:`list` with added "Cube"
context.
"""
def __new__(cls, list_of_cubes=None):
"""Given a :class:`list` of cubes, return a CubeList instance."""
cube_list = list.__new__(cls, list_of_cubes)
# Check that all items in the incoming list are cubes. Note that this
# checking does not guarantee that a CubeList instance *always* has
# just cubes in its list as the append & __getitem__ methods have not
# been overridden.
if not all([isinstance(cube, Cube) for cube in cube_list]):
raise ValueError('All items in list_of_cubes must be Cube '
'instances.')
return cube_list
def __str__(self):
"""Runs short :meth:`Cube.summary` on every cube."""
result = ['%s: %s' % (i, cube.summary(shorten=True)) for i, cube in
enumerate(self)]
if result:
result = '\n'.join(result)
else:
result = '< No cubes >'
return result
def __repr__(self):
"""Runs repr on every cube."""
return '[%s]' % ',\n'.join([repr(cube) for cube in self])
# TODO #370 Which operators need overloads?
def __add__(self, other):
return CubeList(list.__add__(self, other))
def __getitem__(self, keys):
"""x.__getitem__(y) <==> x[y]"""
result = super(CubeList, self).__getitem__(keys)
if isinstance(result, list):
result = CubeList(result)
return result
def __getslice__(self, start, stop):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
result = super(CubeList, self).__getslice__(start, stop)
result = CubeList(result)
return result
def xml(self, checksum=False, order=True, byteorder=True):
"""Return a string of the XML that this list of cubes represents."""
doc = Document()
cubes_xml_element = doc.createElement("cubes")
cubes_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
for cube_obj in self:
cubes_xml_element.appendChild(
cube_obj._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder))
doc.appendChild(cubes_xml_element)
# return our newly created XML string
return doc.toprettyxml(indent=" ")
def extract(self, constraints, strict=False):
"""
Filter each of the cubes which can be filtered by the given
constraints.
This method iterates over each constraint given, and subsets each of
the cubes in this CubeList where possible. Thus, a CubeList of length
**n** when filtered with **m** constraints can generate a maximum of
**m * n** cubes.
Keywords:
* strict - boolean
If strict is True, then there must be exactly one cube which is
filtered per constraint.
"""
return self._extract_and_merge(self, constraints, strict,
merge_unique=None)
@staticmethod
def _extract_and_merge(cubes, constraints, strict, merge_unique=False):
# * merge_unique - if None: no merging, if false: non unique merging,
# else unique merging (see merge)
constraints = iris._constraints.list_of_constraints(constraints)
# group the resultant cubes by constraints in a dictionary
constraint_groups = dict([(constraint, CubeList()) for constraint in
constraints])
for cube in cubes:
for constraint, cube_list in six.iteritems(constraint_groups):
sub_cube = constraint.extract(cube)
if sub_cube is not None:
cube_list.append(sub_cube)
if merge_unique is not None:
for constraint, cubelist in six.iteritems(constraint_groups):
constraint_groups[constraint] = cubelist.merge(merge_unique)
result = CubeList()
for constraint in constraints:
constraint_cubes = constraint_groups[constraint]
if strict and len(constraint_cubes) != 1:
msg = 'Got %s cubes for constraint %r, ' \
'expecting 1.' % (len(constraint_cubes), constraint)
raise iris.exceptions.ConstraintMismatchError(msg)
result.extend(constraint_cubes)
if strict and len(constraints) == 1:
result = result[0]
return result
def extract_strict(self, constraints):
"""
Calls :meth:`CubeList.extract` with the strict keyword set to True.
"""
return self.extract(constraints, strict=True)
def extract_overlapping(self, coord_names):
"""
Returns a :class:`CubeList` of cubes extracted over regions
where the coordinates overlap, for the coordinates
in coord_names.
Args:
* coord_names:
A string or list of strings of the names of the coordinates
over which to perform the extraction.
"""
if isinstance(coord_names, six.string_types):
coord_names = [coord_names]
def make_overlap_fn(coord_name):
def overlap_fn(cell):
return all(cell in cube.coord(coord_name).cells()
for cube in self)
return overlap_fn
coord_values = {coord_name: make_overlap_fn(coord_name)
for coord_name in coord_names}
return self.extract(iris.Constraint(coord_values=coord_values))
def merge_cube(self):
"""
Return the merged contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to merge the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.MergeError` will be raised
describing the reason for the failure.
For example:
>>> cube_1 = iris.cube.Cube([1, 2])
>>> cube_1.add_aux_coord(iris.coords.AuxCoord(0, long_name='x'))
>>> cube_2 = iris.cube.Cube([3, 4])
>>> cube_2.add_aux_coord(iris.coords.AuxCoord(1, long_name='x'))
>>> cube_2.add_dim_coord(
... iris.coords.DimCoord([0, 1], long_name='z'), 0)
>>> single_cube = iris.cube.CubeList([cube_1, cube_2]).merge_cube()
Traceback (most recent call last):
...
iris.exceptions.MergeError: failed to merge into a single cube.
Coordinates in cube.dim_coords differ: z.
Coordinate-to-dimension mapping differs for cube.dim_coords.
"""
if not self:
raise ValueError("can't merge an empty CubeList")
# Register each of our cubes with a single ProtoCube.
proto_cube = iris._merge.ProtoCube(self[0])
for cube in self[1:]:
proto_cube.register(cube, error_on_mismatch=True)
# Extract the merged cube from the ProtoCube.
merged_cube, = proto_cube.merge()
return merged_cube
def merge(self, unique=True):
"""
Returns the :class:`CubeList` resulting from merging this
:class:`CubeList`.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
This combines cubes with different values of an auxiliary scalar
coordinate, by constructing a new dimension.
.. testsetup::
import iris
c1 = iris.cube.Cube([0,1,2], long_name='some_parameter')
xco = iris.coords.DimCoord([11, 12, 13], long_name='x_vals')
c1.add_dim_coord(xco, 0)
c1.add_aux_coord(iris.coords.AuxCoord([100], long_name='y_vals'))
c2 = c1.copy()
c2.coord('y_vals').points = [200]
For example::
>>> print(c1)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 100
>>> print(c2)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 200
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.merge()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 2; x_vals: 3)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[100 200]
>>>
Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins
cubes along an existing dimension.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be merged. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be merged.
"""
# Register each of our cubes with its appropriate ProtoCube.
proto_cubes_by_name = {}
for cube in self:
name = cube.standard_name
proto_cubes = proto_cubes_by_name.setdefault(name, [])
proto_cube = None
for target_proto_cube in proto_cubes:
if target_proto_cube.register(cube):
proto_cube = target_proto_cube
break
if proto_cube is None:
proto_cube = iris._merge.ProtoCube(cube)
proto_cubes.append(proto_cube)
# Emulate Python 2 behaviour.
def _none_sort(item):
return (item is not None, item)
# Extract all the merged cubes from the ProtoCubes.
merged_cubes = CubeList()
for name in sorted(proto_cubes_by_name, key=_none_sort):
for proto_cube in proto_cubes_by_name[name]:
merged_cubes.extend(proto_cube.merge(unique=unique))
return merged_cubes
def concatenate_cube(self, check_aux_coords=True):
"""
Return the concatenated contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to concatenate the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised
describing the reason for the failure.
Kwargs:
* check_aux_coords
Checks the auxilliary coordinates of the cubes match. This check
is not applied to auxilliary coordinates that span the dimension
the concatenation is occuring along. Defaults to True.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
if not self:
raise ValueError("can't concatenate an empty CubeList")
names = [cube.metadata.name() for cube in self]
unique_names = list(collections.OrderedDict.fromkeys(names))
if len(unique_names) == 1:
res = iris._concatenate.concatenate(
self, error_on_mismatch=True,
check_aux_coords=check_aux_coords)
n_res_cubes = len(res)
if n_res_cubes == 1:
return res[0]
else:
msgs = []
msgs.append('An unexpected problem prevented concatenation.')
msgs.append('Expected only a single cube, '
'found {}.'.format(n_res_cubes))
raise iris.exceptions.ConcatenateError(msgs)
else:
msgs = []
msgs.append('Cube names differ: {} != {}'.format(names[0],
names[1]))
raise iris.exceptions.ConcatenateError(msgs)
def concatenate(self, check_aux_coords=True):
"""
Concatenate the cubes over their common dimensions.
Kwargs:
* check_aux_coords
Checks the auxilliary coordinates of the cubes match. This check
is not applied to auxilliary coordinates that span the dimension
the concatenation is occuring along. Defaults to True.
Returns:
A new :class:`iris.cube.CubeList` of concatenated
:class:`iris.cube.Cube` instances.
This combines cubes with a common dimension coordinate, but occupying
different regions of the coordinate value. The cubes are joined across
that dimension.
.. testsetup::
import iris
import numpy as np
xco = iris.coords.DimCoord([11, 12, 13, 14], long_name='x_vals')
yco1 = iris.coords.DimCoord([4, 5], long_name='y_vals')
yco2 = iris.coords.DimCoord([7, 9, 10], long_name='y_vals')
c1 = iris.cube.Cube(np.zeros((2,4)), long_name='some_parameter')
c1.add_dim_coord(xco, 1)
c1.add_dim_coord(yco1, 0)
c2 = iris.cube.Cube(np.zeros((3,4)), long_name='some_parameter')
c2.add_dim_coord(xco, 1)
c2.add_dim_coord(yco2, 0)
For example::
>>> print(c1)
some_parameter / (unknown) (y_vals: 2; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c1.coord('y_vals').points)
[4 5]
>>> print(c2)
some_parameter / (unknown) (y_vals: 3; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c2.coord('y_vals').points)
[ 7 9 10]
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.concatenate()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 5; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[ 4 5 7 9 10]
>>>
Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new
dimension from values of an auxiliary scalar coordinate.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be concatenated. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be concatenated.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
return iris._concatenate.concatenate(self,
check_aux_coords=check_aux_coords)
def _is_single_item(testee):
"""
Return whether this is a single item, rather than an iterable.
We count string types as 'single', also.
"""
return (isinstance(testee, six.string_types)
or not isinstance(testee, collections.Iterable))
class Cube(CFVariableMixin):
"""
A single Iris cube of data and metadata.
Typically obtained from :func:`iris.load`, :func:`iris.load_cube`,
:func:`iris.load_cubes`, or from the manipulation of existing cubes.
For example:
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube)
air_temperature / (K) (latitude: 73; longitude: 96)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
forecast_reference_time: 1998-03-01 03:00:00
pressure: 1000.0 hPa
time: 1998-12-01 00:00:00, \
bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
Attributes:
STASH: m01s16i203
source: Data from Met Office Unified Model
Cell methods:
mean within years: time
mean over years: time
See the :doc:`user guide</userguide/index>` for more information.
"""
#: Indicates to client code that the object supports
#: "orthogonal indexing", which means that slices that are 1d arrays
#: or lists slice along each dimension independently. This behavior
#: is similar to Fortran or Matlab, but different than numpy.
__orthogonal_indexing__ = True
def __init__(self, data, standard_name=None, long_name=None,
var_name=None, units=None, attributes=None,
cell_methods=None, dim_coords_and_dims=None,
aux_coords_and_dims=None, aux_factories=None,
cell_measures_and_dims=None):
"""
Creates a cube with data and optional metadata.
Not typically used - normally cubes are obtained by loading data
(e.g. :func:`iris.load`) or from manipulating existing cubes.
Args:
* data
This object defines the shape of the cube and the phenomenon
value in each cell.
It can be a biggus array, a numpy array, a numpy array
subclass (such as :class:`numpy.ma.MaskedArray`), or an
*array_like* as described in :func:`numpy.asarray`.
See :attr:`Cube.data<iris.cube.Cube.data>`.
Kwargs:
* standard_name
The standard name for the Cube's data.
* long_name
An unconstrained description of the cube.
* var_name
The CF variable name for the cube.
* units
The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``.
* attributes
A dictionary of cube attributes
* cell_methods
A tuple of CellMethod objects, generally set by Iris, e.g.
``(CellMethod("mean", coords='latitude'), )``.
* dim_coords_and_dims
A list of coordinates with scalar dimension mappings, e.g
``[(lat_coord, 0), (lon_coord, 1)]``.
* aux_coords_and_dims
A list of coordinates with dimension mappings,
e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
* aux_factories
A list of auxiliary coordinate factories. See
:mod:`iris.aux_factory`.
* cell_measures_and_dims
A list of CellMeasures with dimension mappings.
For example::
>>> from iris.coords import DimCoord
>>> from iris.cube import Cube
>>> latitude = DimCoord(np.linspace(-90, 90, 4),
... standard_name='latitude',
... units='degrees')
>>> longitude = DimCoord(np.linspace(45, 360, 8),
... standard_name='longitude',
... units='degrees')
>>> cube = Cube(np.zeros((4, 8), np.float32),
... dim_coords_and_dims=[(latitude, 0),
... (longitude, 1)])
"""
# Temporary error while we transition the API.
if isinstance(data, six.string_types):
raise TypeError('Invalid data type: {!r}.'.format(data))
if not isinstance(data, (biggus.Array, ma.MaskedArray)):
data = np.asarray(data)
self._my_data = data
#: The "standard name" for the Cube's phenomenon.
self.standard_name = standard_name
#: An instance of :class:`cf_units.Unit` describing the Cube's data.
self.units = units
#: The "long name" for the Cube's phenomenon.
self.long_name = long_name
#: The CF variable name for the Cube.
self.var_name = var_name
self.cell_methods = cell_methods
#: A dictionary, with a few restricted keys, for arbitrary
#: Cube metadata.
self.attributes = attributes
# Coords
self._dim_coords_and_dims = []
self._aux_coords_and_dims = []
self._aux_factories = []
# Cell Measures
self._cell_measures_and_dims = []
identities = set()
if dim_coords_and_dims:
dims = set()
for coord, dim in dim_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities and dim not in dims:
self._add_unique_dim_coord(coord, dim)
else:
self.add_dim_coord(coord, dim)
identities.add(identity)
dims.add(dim)
if aux_coords_and_dims:
for coord, dims in aux_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities:
self._add_unique_aux_coord(coord, dims)
else:
self.add_aux_coord(coord, dims)
identities.add(identity)
if aux_factories:
for factory in aux_factories:
self.add_aux_factory(factory)
if cell_measures_and_dims:
for cell_measure, dims in cell_measures_and_dims:
self.add_cell_measure(cell_measure, dims)
@property
def metadata(self):
"""
An instance of :class:`CubeMetadata` describing the phenomenon.
This property can be updated with any of:
- another :class:`CubeMetadata` instance,
- a tuple/dict which can be used to make a :class:`CubeMetadata`,
- or any object providing the attributes exposed by
:class:`CubeMetadata`.
"""
return CubeMetadata(self.standard_name, self.long_name, self.var_name,
self.units, self.attributes, self.cell_methods)
@metadata.setter
def metadata(self, value):
try:
value = CubeMetadata(**value)
except TypeError:
try:
value = CubeMetadata(*value)
except TypeError:
missing_attrs = [field for field in CubeMetadata._fields
if not hasattr(value, field)]
if missing_attrs:
raise TypeError('Invalid/incomplete metadata')
for name in CubeMetadata._fields:
setattr(self, name, getattr(value, name))
def is_compatible(self, other, ignore=None):
"""
Return whether the cube is compatible with another.
Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`,
:attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and
:attr:`iris.cube.Cube.attributes` that are present in both objects.
Args:
* other:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* ignore:
A single attribute key or iterable of attribute keys to ignore when
comparing the cubes. Default is None. To ignore all attributes set
this to other.attributes.
Returns:
Boolean.
.. seealso::
:meth:`iris.util.describe_diff()`
.. note::
This function does not indicate whether the two cubes can be
merged, instead it checks only the four items quoted above for
equality. Determining whether two cubes will merge requires
additional logic that is beyond the scope of this method.
"""
compatible = (self.name() == other.name() and
self.units == other.units and
self.cell_methods == other.cell_methods)
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, six.string_types):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
def convert_units(self, unit):
"""
Change the cube's units, converting the values in the data array.
For example, if a cube's :attr:`~iris.cube.Cube.units` are
kelvin then::
cube.convert_units('celsius')
will change the cube's :attr:`~iris.cube.Cube.units` attribute to
celsius and subtract 273.15 from each value in
:attr:`~iris.cube.Cube.data`.
.. warning::
Calling this method will trigger any deferred loading, causing
the cube's data array to be loaded into memory.
"""
# If the cube has units convert the data.
if not self.units.is_unknown():
self.data = self.units.convert(self.data, unit)
self.units = unit
def add_cell_method(self, cell_method):
"""Add a CellMethod to the Cube."""
self.cell_methods += (cell_method, )
def add_aux_coord(self, coord, data_dims=None):
"""
Adds a CF auxiliary coordinate to the cube.
Args:
* coord
The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(coord): # TODO: just fail on duplicate object
raise ValueError('Duplicate coordinates are not permitted.')
self._add_unique_aux_coord(coord, data_dims)
def _check_multi_dim_metadata(self, metadata, data_dims):
# Convert to a tuple of integers
if data_dims is None:
data_dims = tuple()
elif isinstance(data_dims, collections.Container):
data_dims = tuple(int(d) for d in data_dims)
else:
data_dims = (int(data_dims),)
if data_dims:
if len(data_dims) != metadata.ndim:
msg = 'Invalid data dimensions: {} given, {} expected for ' \
'{!r}.'.format(len(data_dims), metadata.ndim,
metadata.name())
raise ValueError(msg)
# Check compatibility with the shape of the data
for i, dim in enumerate(data_dims):
if metadata.shape[i] != self.shape[dim]:
msg = 'Unequal lengths. Cube dimension {} => {};' \
' metadata {!r} dimension {} => {}.'
raise ValueError(msg.format(dim, self.shape[dim],
metadata.name(), i,
metadata.shape[i]))
elif metadata.shape != (1,):
msg = 'Missing data dimensions for multi-valued {} {!r}'
msg = msg.format(metadata.__class__.__name__, metadata.name())
raise ValueError(msg)
return data_dims
def _add_unique_aux_coord(self, coord, data_dims):
data_dims = self._check_multi_dim_metadata(coord, data_dims)
self._aux_coords_and_dims.append([coord, data_dims])
def add_aux_factory(self, aux_factory):
"""
Adds an auxiliary coordinate factory to the cube.
Args:
* aux_factory
The :class:`iris.aux_factory.AuxCoordFactory` instance to add.
"""
if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory):
raise TypeError('Factory must be a subclass of '
'iris.aux_factory.AuxCoordFactory.')
self._aux_factories.append(aux_factory)
def add_cell_measure(self, cell_measure, data_dims=None):
"""
Adds a CF cell measure to the cube.
Args:
* cell_measure
The :class:`iris.coords.CellMeasure`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a cell_measure with identical metadata already
exists on the cube.
See also
:meth:`Cube.remove_cell_measure()<iris.cube.Cube.remove_cell_measure>`.
"""
if self.cell_measures(cell_measure):
raise ValueError('Duplicate cell_measures are not permitted.')
data_dims = self._check_multi_dim_metadata(cell_measure, data_dims)
self._cell_measures_and_dims.append([cell_measure, data_dims])
self._cell_measures_and_dims.sort(key=lambda cm_dims:
(cm_dims[0]._as_defn(), cm_dims[1]))
def add_dim_coord(self, dim_coord, data_dim):
"""
Add a CF coordinate to the cube.
Args:
* dim_coord
The :class:`iris.coords.DimCoord` instance to add to the cube.
* data_dim
Integer giving the data dimension spanned by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube or if a coord already exists for the
given dimension.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(dim_coord):
raise ValueError('The coordinate already exists on the cube. '
'Duplicate coordinates are not permitted.')
# Check dimension is available
if self.coords(dimensions=data_dim, dim_coords=True):
raise ValueError('A dim_coord is already associated with '
'dimension %d.' % data_dim)
self._add_unique_dim_coord(dim_coord, data_dim)
def _add_unique_dim_coord(self, dim_coord, data_dim):
if isinstance(dim_coord, iris.coords.AuxCoord):
raise ValueError('The dim_coord may not be an AuxCoord instance.')
# Convert data_dim to a single integer
if isinstance(data_dim, collections.Container):
if len(data_dim) != 1:
raise ValueError('The supplied data dimension must be a'
' single number.')
data_dim = int(list(data_dim)[0])
else:
data_dim = int(data_dim)
# Check data_dim value is valid
if data_dim < 0 or data_dim >= self.ndim:
raise ValueError('The cube does not have the specified dimension '
'(%d)' % data_dim)
# Check compatibility with the shape of the data
if dim_coord.shape[0] != self.shape[data_dim]:
msg = 'Unequal lengths. Cube dimension {} => {}; coord {!r} => {}.'
raise ValueError(msg.format(data_dim, self.shape[data_dim],
dim_coord.name(),
len(dim_coord.points)))
self._dim_coords_and_dims.append([dim_coord, int(data_dim)])
def remove_aux_factory(self, aux_factory):
"""Removes the given auxiliary coordinate factory from the cube."""
self._aux_factories.remove(aux_factory)
def _remove_coord(self, coord):
self._dim_coords_and_dims = [(coord_, dim) for coord_, dim in
self._dim_coords_and_dims if coord_
is not coord]
self._aux_coords_and_dims = [(coord_, dims) for coord_, dims in
self._aux_coords_and_dims if coord_
is not coord]
def remove_coord(self, coord):
"""
Removes a coordinate from the cube.
Args:
* coord (string or coord)
The (name of the) coordinate to remove from the cube.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
"""
coord = self.coord(coord)
self._remove_coord(coord)
for factory in self.aux_factories:
factory.update(coord)
def remove_cell_measure(self, cell_measure):
"""
Removes a cell measure from the cube.
Args:
* cell_measure (CellMeasure)
The CellMeasure to remove from the cube.
See also
:meth:`Cube.add_cell_measure()<iris.cube.Cube.add_cell_measure>`
"""
self._cell_measures_and_dims = [[cell_measure_, dim] for cell_measure_,
dim in self._cell_measures_and_dims
if cell_measure_ is not cell_measure]
def replace_coord(self, new_coord):
"""
Replace the coordinate whose metadata matches the given coordinate.
"""
old_coord = self.coord(new_coord)
dims = self.coord_dims(old_coord)
was_dimensioned = old_coord in self.dim_coords
self._remove_coord(old_coord)
if was_dimensioned and isinstance(new_coord, iris.coords.DimCoord):
self.add_dim_coord(new_coord, dims[0])
else:
self.add_aux_coord(new_coord, dims)
for factory in self.aux_factories:
factory.update(old_coord, new_coord)
def coord_dims(self, coord):
"""
Returns a tuple of the data dimensions relevant to the given
coordinate.
When searching for the given coordinate in the cube the comparison is
made using coordinate metadata equality. Hence the given coordinate
instance need not exist on the cube, and may contain different
coordinate values.
Args:
* coord (string or coord)
The (name of the) coord to look for.
"""
coord = self.coord(coord)
# Search for existing coordinate (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if
coord_ is coord]
if not matches:
matches = [dims for coord_, dims in self._aux_coords_and_dims if
coord_ is coord]
# Search derived aux coords
target_defn = coord._as_defn()
if not matches:
match = lambda factory: factory._as_defn() == target_defn
factories = filter(match, self._aux_factories)
matches = [factory.derived_dims(self.coord_dims) for factory in
factories]
if not matches:
raise iris.exceptions.CoordinateNotFoundError(coord.name())
return matches[0]
def cell_measure_dims(self, cell_measure):
"""
Returns a tuple of the data dimensions relevant to the given
CellMeasure.
* cell_measure
The CellMeasure to look for.
"""
# Search for existing cell measure (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [dims for cm_, dims in self._cell_measures_and_dims if
cm_ is cell_measure]
if not matches:
raise iris.exceptions.CellMeasureNotFoundError(cell_measure.name())
return matches[0]
def aux_factory(self, name=None, standard_name=None, long_name=None,
var_name=None):
"""
Returns the single coordinate factory that matches the criteria,
or raises an error if not found.
Kwargs:
* name
If not None, matches against factory.name().
* standard_name
The CF standard name of the desired coordinate factory.
If None, does not check for standard name.
* long_name
An unconstrained description of the coordinate factory.
If None, does not check for long_name.
* var_name
The CF variable name of the desired coordinate factory.
If None, does not check for var_name.
.. note::
If the arguments given do not result in precisely 1 coordinate
factory being matched, an
:class:`iris.exceptions.CoordinateNotFoundError` is raised.
"""
factories = self.aux_factories
if name is not None:
factories = [factory for factory in factories if
factory.name() == name]
if standard_name is not None:
factories = [factory for factory in factories if
factory.standard_name == standard_name]
if long_name is not None:
factories = [factory for factory in factories if
factory.long_name == long_name]
if var_name is not None:
factories = [factory for factory in factories if
factory.var_name == var_name]
if len(factories) > 1:
factory_names = (factory.name() for factory in factories)
msg = 'Expected to find exactly one coordinate factory, but ' \
'found {}. They were: {}.'.format(len(factories),
', '.join(factory_names))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(factories) == 0:
msg = 'Expected to find exactly one coordinate factory, but ' \
'found none.'
raise iris.exceptions.CoordinateNotFoundError(msg)
return factories[0]
def coords(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a list of coordinates in this cube fitting the given criteria.
Kwargs:
* name_or_coord
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a coordinate instance with metadata equal to that of
the desired coordinates. Accepts either a
:class:`iris.coords.DimCoord`, :class:`iris.coords.AuxCoord`,
:class:`iris.aux_factory.AuxCoordFactory`
or :class:`iris.coords.CoordDefn`.
* name
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* standard_name
The CF standard name of the desired coordinate. If None, does not
check for standard name.
* long_name
An unconstrained description of the coordinate. If None, does not
check for long_name.
* var_name
The CF variable name of the desired coordinate. If None, does not
check for var_name.
* attributes
A dictionary of attributes desired on the coordinates. If None,
does not check for attributes.
* axis
The desired coordinate axis, see
:func:`iris.util.guess_coord_axis`. If None, does not check for
axis. Accepts the values 'X', 'Y', 'Z' and 'T' (case-insensitive).
* contains_dimension
The desired coordinate contains the data dimension. If None, does
not check for the dimension.
* dimensions
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty tuple or list
(i.e. ``()`` or ``[]``). If None, does not check for dimensions.
* coord
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* coord_system
Whether the desired coordinates have coordinate systems equal to
the given coordinate system. If None, no check is done.
* dim_coords
Set to True to only return coordinates that are the cube's
dimension coordinates. Set to False to only return coordinates
that are the cube's auxiliary and derived coordinates. If None,
returns all coordinates.
See also :meth:`Cube.coord()<iris.cube.Cube.coord>`.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warn_deprecated('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warn_deprecated('the coord kwarg is deprecated and will be '
'removed in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
name = None
coord = None
if isinstance(name_or_coord, six.string_types):
name = name_or_coord
else:
coord = name_or_coord
coords_and_factories = []
if dim_coords in [True, None]:
coords_and_factories += list(self.dim_coords)
if dim_coords in [False, None]:
coords_and_factories += list(self.aux_coords)
coords_and_factories += list(self.aux_factories)
if name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.name() == name]
if standard_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.standard_name == standard_name]
if long_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.long_name == long_name]
if var_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.var_name == var_name]
if axis is not None:
axis = axis.upper()
guess_axis = iris.util.guess_coord_axis
coords_and_factories = [coord_ for coord_ in coords_and_factories
if guess_axis(coord_) == axis]
if attributes is not None:
if not isinstance(attributes, collections.Mapping):
msg = 'The attributes keyword was expecting a dictionary ' \
'type, but got a %s instead.' % type(attributes)
raise ValueError(msg)
attr_filter = lambda coord_: all(k in coord_.attributes and
coord_.attributes[k] == v for
k, v in six.iteritems(attributes))
coords_and_factories = [coord_ for coord_ in coords_and_factories
if attr_filter(coord_)]
if coord_system is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.coord_system == coord_system]
if coord is not None:
if isinstance(coord, iris.coords.CoordDefn):
defn = coord
else:
defn = coord._as_defn()
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_._as_defn() == defn]
if contains_dimension is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if contains_dimension in
self.coord_dims(coord_)]
if dimensions is not None:
if not isinstance(dimensions, collections.Container):
dimensions = [dimensions]
dimensions = tuple(dimensions)
coords_and_factories = [coord_ for coord_ in coords_and_factories
if self.coord_dims(coord_) == dimensions]
# If any factories remain after the above filters we have to make the
# coords so they can be returned
def extract_coord(coord_or_factory):
if isinstance(coord_or_factory, iris.aux_factory.AuxCoordFactory):
coord = coord_or_factory.make_coord(self.coord_dims)
elif isinstance(coord_or_factory, iris.coords.Coord):
coord = coord_or_factory
else:
msg = 'Expected Coord or AuxCoordFactory, got ' \
'{!r}.'.format(type(coord_or_factory))
raise ValueError(msg)
return coord
coords = [extract_coord(coord_or_factory) for coord_or_factory in
coords_and_factories]
return coords
def coord(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a single coord given the same arguments as :meth:`Cube.coords`.
.. note::
If the arguments given do not result in precisely 1 coordinate
being matched, an :class:`iris.exceptions.CoordinateNotFoundError`
is raised.
.. seealso::
:meth:`Cube.coords()<iris.cube.Cube.coords>` for full keyword
documentation.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warn_deprecated('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warn_deprecated('the coord kwarg is deprecated and will be '
'removed in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
coords = self.coords(name_or_coord=name_or_coord,
standard_name=standard_name,
long_name=long_name, var_name=var_name,
attributes=attributes, axis=axis,
contains_dimension=contains_dimension,
dimensions=dimensions,
coord_system=coord_system,
dim_coords=dim_coords)
if len(coords) > 1:
msg = 'Expected to find exactly 1 coordinate, but found %s. ' \
'They were: %s.' % (len(coords), ', '.join(coord.name() for
coord in coords))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(coords) == 0:
bad_name = name or standard_name or long_name or \
(coord and coord.name()) or ''
msg = 'Expected to find exactly 1 %s coordinate, but found ' \
'none.' % bad_name
raise iris.exceptions.CoordinateNotFoundError(msg)
return coords[0]
def coord_system(self, spec=None):
"""
Find the coordinate system of the given type.
If no target coordinate system is provided then find
any available coordinate system.
Kwargs:
* spec:
The the name or type of a coordinate system subclass.
E.g. ::
cube.coord_system("GeogCS")
cube.coord_system(iris.coord_systems.GeogCS)
If spec is provided as a type it can be a superclass of
any coordinate system found.
If spec is None, then find any available coordinate
systems within the :class:`iris.cube.Cube`.
Returns:
The :class:`iris.coord_systems.CoordSystem` or None.
"""
if isinstance(spec, six.string_types) or spec is None:
spec_name = spec
else:
msg = "type %s is not a subclass of CoordSystem" % spec
assert issubclass(spec, iris.coord_systems.CoordSystem), msg
spec_name = spec.__name__
# Gather a temporary list of our unique CoordSystems.
coord_systems = ClassDict(iris.coord_systems.CoordSystem)
for coord in self.coords():
if coord.coord_system:
coord_systems.add(coord.coord_system, replace=True)
result = None
if spec_name is None:
for key in sorted(coord_systems.keys(),
key=lambda class_: class_.__name__):
result = coord_systems[key]
break
else:
result = coord_systems.get(spec_name)
return result
def cell_measures(self, name_or_cell_measure=None):
"""
Return a list of cell measures in this cube fitting the given criteria.
Kwargs:
* name_or_cell_measure
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a cell_measure instance with metadata equal to that of
the desired cell_measures.
See also :meth:`Cube.cell_measure()<iris.cube.Cube.cell_measure>`.
"""
name = None
if isinstance(name_or_cell_measure, six.string_types):
name = name_or_cell_measure
else:
cell_measure = name_or_cell_measure
cell_measures = []
for cm, _ in self._cell_measures_and_dims:
if name is not None:
if cm.name() == name:
cell_measures.append(cm)
elif cell_measure is not None:
if cm == cell_measure:
cell_measures.append(cm)
else:
cell_measures.append(cm)
return cell_measures
def cell_measure(self, name_or_cell_measure=None):
"""
Return a single cell_measure given the same arguments as
:meth:`Cube.cell_measures`.
.. note::
If the arguments given do not result in precisely 1 cell_measure
being matched, an :class:`iris.exceptions.CellMeasureNotFoundError`
is raised.
.. seealso::
:meth:`Cube.cell_measures()<iris.cube.Cube.cell_measures>`
for full keyword documentation.
"""
cell_measures = self.cell_measures(name_or_cell_measure)
if len(cell_measures) > 1:
msg = ('Expected to find exactly 1 cell_measure, but found {}. '
'They were: {}.')
msg = msg.format(len(cell_measures),
', '.join(cm.name() for cm in cell_measures))
raise iris.exceptions.CellMeasureNotFoundError(msg)
elif len(cell_measures) == 0:
if isinstance(name_or_cell_measure, six.string_types):
bad_name = name_or_cell_measure
else:
bad_name = (name_or_cell_measure and
name_or_cell_measure.name()) or ''
msg = 'Expected to find exactly 1 %s cell_measure, but found ' \
'none.' % bad_name
raise iris.exceptions.CellMeasureNotFoundError(msg)
return cell_measures[0]
@property
def cell_methods(self):
"""
Tuple of :class:`iris.coords.CellMethod` representing the processing
done on the phenomenon.
"""
return self._cell_methods
@cell_methods.setter
def cell_methods(self, cell_methods):
self._cell_methods = tuple(cell_methods) if cell_methods else tuple()
@property
def shape(self):
"""The shape of the data of this cube."""
shape = self.lazy_data().shape
return shape
@property
def dtype(self):
"""The :class:`numpy.dtype` of the data of this cube."""
return self.lazy_data().dtype
@property
def ndim(self):
"""The number of dimensions in the data of this cube."""
return len(self.shape)
def lazy_data(self, array=None):
"""
Return a :class:`biggus.Array` representing the
multi-dimensional data of the Cube, and optionally provide a
new array of values.
Accessing this method will never cause the data to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the Cube to have loaded data.
If the data have already been loaded for the Cube, the returned
Array will be a :class:`biggus.NumpyArrayAdapter` which wraps
the numpy array from `self.data`.
Kwargs:
* array (:class:`biggus.Array` or None):
When this is not None it sets the multi-dimensional data of
the cube to the given value.
Returns:
A :class:`biggus.Array` representing the multi-dimensional
data of the Cube.
"""
if array is not None:
if not isinstance(array, biggus.Array):
raise TypeError('new values must be a biggus.Array')
if self.shape != array.shape:
# The _ONLY_ data reshape permitted is converting a
# 0-dimensional array into a 1-dimensional array of
# length one.
# i.e. self.shape = () and array.shape == (1,)
if self.shape or array.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, array.shape))
self._my_data = array
else:
array = self._my_data
if not isinstance(array, biggus.Array):
array = biggus.NumpyArrayAdapter(array)
return array
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multi-dimensional data of
the cube.
.. note::
Cubes obtained from netCDF, PP, and FieldsFile files will only
populate this attribute on its first use.
To obtain the shape of the data without causing it to be loaded,
use the Cube.shape attribute.
Example::
>>> fname = iris.sample_data_path('air_temp.pp')
>>> cube = iris.load_cube(fname, 'air_temperature')
>>> # cube.data does not yet have a value.
...
>>> print(cube.shape)
(73, 96)
>>> # cube.data still does not have a value.
...
>>> cube = cube[:10, :20]
>>> # cube.data still does not have a value.
...
>>> data = cube.data
>>> # Only now is the data loaded.
...
>>> print(data.shape)
(10, 20)
"""
data = self._my_data
if not isinstance(data, np.ndarray):
try:
data = data.masked_array()
except MemoryError:
msg = "Failed to create the cube's data as there was not" \
" enough memory available.\n" \
"The array shape would have been {0!r} and the data" \
" type {1}.\n" \
"Consider freeing up variables or indexing the cube" \
" before getting its data."
msg = msg.format(self.shape, data.dtype)
raise MemoryError(msg)
# Unmask the array only if it is filled.
if isinstance(data, np.ndarray) and ma.count_masked(data) == 0:
data = data.data
# data may be a numeric type, so ensure an np.ndarray is returned
self._my_data = np.asanyarray(data)
return self._my_data
@data.setter
def data(self, value):
data = np.asanyarray(value)
if self.shape != data.shape:
# The _ONLY_ data reshape permitted is converting a 0-dimensional
# array i.e. self.shape == () into a 1-dimensional array of length
# one i.e. data.shape == (1,)
if self.shape or data.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, data.shape))
self._my_data = data
def has_lazy_data(self):
return isinstance(self._my_data, biggus.Array)
@property
def dim_coords(self):
"""
Return a tuple of all the dimension coordinates, ordered by dimension.
.. note::
The length of the returned tuple is not necessarily the same as
:attr:`Cube.ndim` as there may be dimensions on the cube without
dimension coordinates. It is therefore unreliable to use the
resulting tuple to identify the dimension coordinates for a given
dimension - instead use the :meth:`Cube.coord` method with the
``dimensions`` and ``dim_coords`` keyword arguments.
"""
return tuple((coord for coord, dim in
sorted(self._dim_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def aux_coords(self):
"""
Return a tuple of all the auxiliary coordinates, ordered by
dimension(s).
"""
return tuple((coord for coord, dims in
sorted(self._aux_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def derived_coords(self):
"""
Return a tuple of all the coordinates generated by the coordinate
factories.
"""
return tuple(factory.make_coord(self.coord_dims) for factory in
sorted(self.aux_factories,
key=lambda factory: factory.name()))
@property
def aux_factories(self):
"""Return a tuple of all the coordinate factories."""
return tuple(self._aux_factories)
def _summary_coord_extra(self, coord, indent):
# Returns the text needed to ensure this coordinate can be
# distinguished from all others with the same name.
extra = ''
similar_coords = self.coords(coord.name())
if len(similar_coords) > 1:
# Find all the attribute keys
keys = set()
for similar_coord in similar_coords:
keys.update(six.iterkeys(similar_coord.attributes))
# Look for any attributes that vary
vary = set()
attributes = {}
for key in keys:
for similar_coord in similar_coords:
if key not in similar_coord.attributes:
vary.add(key)
break
value = similar_coord.attributes[key]
if attributes.setdefault(key, value) != value:
vary.add(key)
break
keys = sorted(vary & set(coord.attributes.keys()))
bits = ['{}={!r}'.format(key, coord.attributes[key]) for key in
keys]
if bits:
extra = indent + ', '.join(bits)
return extra
def _summary_extra(self, coords, summary, indent):
# Where necessary, inserts extra lines into the summary to ensure
# coordinates can be distinguished.
new_summary = []
for coord, summary in zip(coords, summary):
new_summary.append(summary)
extra = self._summary_coord_extra(coord, indent)
if extra:
new_summary.append(extra)
return new_summary
def summary(self, shorten=False, name_padding=35):
"""
Unicode string summary of the Cube with name, a list of dim coord names
versus length and optionally relevant coordinate information.
"""
# Create a set to contain the axis names for each data dimension.
dim_names = [set() for dim in range(len(self.shape))]
# Add the dim_coord names that participate in the associated data
# dimensions.
for dim in range(len(self.shape)):
dim_coords = self.coords(contains_dimension=dim, dim_coords=True)
if dim_coords:
dim_names[dim].add(dim_coords[0].name())
else:
dim_names[dim].add('-- ')
# Convert axes sets to lists and sort.
dim_names = [sorted(names, key=sorted_axes) for names in dim_names]
# Generate textual summary of the cube dimensionality.
if self.shape == ():
dimension_header = 'scalar cube'
else:
dimension_header = '; '.join(
[', '.join(dim_names[dim]) +
': %d' % dim_shape for dim, dim_shape in
enumerate(self.shape)])
nameunit = '{name} / ({units})'.format(name=self.name(),
units=self.units)
cube_header = '{nameunit!s:{length}} ({dimension})'.format(
length=name_padding,
nameunit=nameunit,
dimension=dimension_header)
summary = ''
# Generate full cube textual summary.
if not shorten:
indent = 10
extra_indent = ' ' * 13
# Cache the derived coords so we can rely on consistent
# object IDs.
derived_coords = self.derived_coords
# Determine the cube coordinates that are scalar (single-valued)
# AND non-dimensioned.
dim_coords = self.dim_coords
aux_coords = self.aux_coords
all_coords = dim_coords + aux_coords + derived_coords
scalar_coords = [coord for coord in all_coords if not
self.coord_dims(coord) and coord.shape == (1,)]
# Determine the cube coordinates that are not scalar BUT
# dimensioned.
scalar_coord_ids = set(map(id, scalar_coords))
vector_dim_coords = [coord for coord in dim_coords if id(coord) not
in scalar_coord_ids]
vector_aux_coords = [coord for coord in aux_coords if id(coord) not
in scalar_coord_ids]
vector_derived_coords = [coord for coord in derived_coords if
id(coord) not in scalar_coord_ids]
# cell measures
vector_cell_measures = [cm for cm in self.cell_measures()
if cm.shape != (1,)]
# Determine the cube coordinates that don't describe the cube and
# are most likely erroneous.
vector_coords = vector_dim_coords + vector_aux_coords + \
vector_derived_coords
ok_coord_ids = scalar_coord_ids.union(set(map(id, vector_coords)))
invalid_coords = [coord for coord in all_coords if id(coord) not
in ok_coord_ids]
# Sort scalar coordinates by name.
scalar_coords.sort(key=lambda coord: coord.name())
# Sort vector coordinates by data dimension and name.
vector_dim_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_aux_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_derived_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
# Sort other coordinates by name.
invalid_coords.sort(key=lambda coord: coord.name())
#
# Generate textual summary of cube vector coordinates.
#
def vector_summary(vector_coords, cube_header, max_line_offset,
cell_measures=None):
"""
Generates a list of suitably aligned strings containing coord
names and dimensions indicated by one or more 'x' symbols.
.. note::
The function may need to update the cube header so this is
returned with the list of strings.
"""
if cell_measures is None:
cell_measures = []
vector_summary = []
vectors = []
# Identify offsets for each dimension text marker.
alignment = np.array([index for index, value in
enumerate(cube_header) if
value == ':'])
# Generate basic textual summary for each vector coordinate
# - WITHOUT dimension markers.
for coord in vector_coords + cell_measures:
vector_summary.append('%*s%s' % (
indent, ' ', iris.util.clip_string(coord.name())))
min_alignment = min(alignment)
# Determine whether the cube header requires realignment
# due to one or more longer vector coordinate summaries.
if max_line_offset >= min_alignment:
delta = max_line_offset - min_alignment + 5
cube_header = '%-*s (%s)' % (int(name_padding + delta),
self.name() or 'unknown',
dimension_header)
alignment += delta
if vector_coords:
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(vector_coords):
dims = self.coord_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
vectors = vectors + vector_coords
if cell_measures:
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(cell_measures):
dims = self.cell_measure_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
vectors = vectors + cell_measures
# Interleave any extra lines that are needed to distinguish
# the coordinates.
vector_summary = self._summary_extra(vectors,
vector_summary,
extra_indent)
return vector_summary, cube_header
# Calculate the maximum line offset.
max_line_offset = 0
for coord in all_coords:
max_line_offset = max(max_line_offset, len('%*s%s' % (
indent, ' ', iris.util.clip_string(str(coord.name())))))
if vector_dim_coords:
dim_coord_summary, cube_header = vector_summary(
vector_dim_coords, cube_header, max_line_offset)
summary += '\n Dimension coordinates:\n' + \
'\n'.join(dim_coord_summary)
if vector_aux_coords:
aux_coord_summary, cube_header = vector_summary(
vector_aux_coords, cube_header, max_line_offset)
summary += '\n Auxiliary coordinates:\n' + \
'\n'.join(aux_coord_summary)
if vector_derived_coords:
derived_coord_summary, cube_header = vector_summary(
vector_derived_coords, cube_header, max_line_offset)
summary += '\n Derived coordinates:\n' + \
'\n'.join(derived_coord_summary)
#
# Generate summary of cube cell measures attribute
#
if vector_cell_measures:
cell_measure_summary, cube_header = vector_summary(
[], cube_header, max_line_offset,
cell_measures=vector_cell_measures)
summary += '\n Cell Measures:\n'
summary += '\n'.join(cell_measure_summary)
#
# Generate textual summary of cube scalar coordinates.
#
scalar_summary = []
if scalar_coords:
for coord in scalar_coords:
if (coord.units in ['1', 'no_unit', 'unknown'] or
coord.units.is_time_reference()):
unit = ''
else:
unit = ' {!s}'.format(coord.units)
# Format cell depending on type of point and whether it
# has a bound
with iris.FUTURE.context(cell_datetime_objects=False):
coord_cell = coord.cell(0)
if isinstance(coord_cell.point, six.string_types):
# Indent string type coordinates
coord_cell_split = [iris.util.clip_string(str(item))
for item in
coord_cell.point.split('\n')]
line_sep = '\n{pad:{width}}'.format(
pad=' ', width=indent + len(coord.name()) + 2)
coord_cell_str = line_sep.join(coord_cell_split) + unit
else:
# Human readable times
if coord.units.is_time_reference():
coord_cell_cpoint = coord.units.num2date(
coord_cell.point)
if coord_cell.bound is not None:
coord_cell_cbound = coord.units.num2date(
coord_cell.bound)
else:
coord_cell_cpoint = coord_cell.point
coord_cell_cbound = coord_cell.bound
coord_cell_str = '{!s}{}'.format(coord_cell_cpoint,
unit)
if coord_cell.bound is not None:
bound = '({})'.format(', '.join(str(val) for
val in coord_cell_cbound))
coord_cell_str += ', bound={}{}'.format(bound,
unit)
scalar_summary.append('{pad:{width}}{name}: {cell}'.format(
pad=' ', width=indent, name=coord.name(),
cell=coord_cell_str))
# Interleave any extra lines that are needed to distinguish
# the coordinates.
scalar_summary = self._summary_extra(scalar_coords,
scalar_summary,
extra_indent)
summary += '\n Scalar coordinates:\n' + '\n'.join(
scalar_summary)
#
# Generate summary of cube's invalid coordinates.
#
if invalid_coords:
invalid_summary = []
for coord in invalid_coords:
invalid_summary.append(
'%*s%s' % (indent, ' ', coord.name()))
# Interleave any extra lines that are needed to distinguish the
# coordinates.
invalid_summary = self._summary_extra(
invalid_coords, invalid_summary, extra_indent)
summary += '\n Invalid coordinates:\n' + \
'\n'.join(invalid_summary)
# cell measures
scalar_cell_measures = [cm for cm in self.cell_measures()
if cm.shape == (1,)]
if scalar_cell_measures:
summary += '\n Scalar cell measures:\n'
scalar_cms = [' {}'.format(cm.name())
for cm in scalar_cell_measures]
summary += '\n'.join(scalar_cms)
#
# Generate summary of cube attributes.
#
if self.attributes:
attribute_lines = []
for name, value in sorted(six.iteritems(self.attributes)):
value = iris.util.clip_string(six.text_type(value))
line = u'{pad:{width}}{name}: {value}'.format(pad=' ',
width=indent,
name=name,
value=value)
attribute_lines.append(line)
summary += '\n Attributes:\n' + '\n'.join(attribute_lines)
#
# Generate summary of cube cell methods
#
if self.cell_methods:
summary += '\n Cell methods:\n'
cm_lines = []
for cm in self.cell_methods:
cm_lines.append('%*s%s' % (indent, ' ', str(cm)))
summary += '\n'.join(cm_lines)
# Construct the final cube summary.
summary = cube_header + summary
return summary
def assert_valid(self):
"""
Does nothing and returns None.
.. deprecated:: 0.8
"""
warn_deprecated('Cube.assert_valid() has been deprecated.')
def __str__(self):
# six has a decorator for this bit, but it doesn't do errors='replace'.
if six.PY3:
return self.summary()
else:
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
def __repr__(self):
return "<iris 'Cube' of %s>" % self.summary(shorten=True,
name_padding=1)
def __iter__(self):
raise TypeError('Cube is not iterable')
def __getitem__(self, keys):
"""
Cube indexing (through use of square bracket notation) has been
implemented at the data level. That is, the indices provided to this
method should be aligned to the data of the cube, and thus the indices
requested must be applicable directly to the cube.data attribute. All
metadata will be subsequently indexed appropriately.
"""
# turn the keys into a full slice spec (all dims)
full_slice = iris.util._build_full_slice_given_keys(keys,
len(self.shape))
# make indexing on the cube column based by using the
# column_slices_generator (potentially requires slicing the data
# multiple times)
dimension_mapping, slice_gen = iris.util.column_slices_generator(
full_slice, len(self.shape))
new_coord_dims = lambda coord_: [dimension_mapping[d] for d in
self.coord_dims(coord_) if
dimension_mapping[d] is not None]
new_cell_measure_dims = lambda cm_: [dimension_mapping[d] for d in
self.cell_measure_dims(cm_) if
dimension_mapping[d] is not None]
try:
first_slice = next(slice_gen)
except StopIteration:
first_slice = None
if first_slice is not None:
data = self._my_data[first_slice]
else:
data = copy.deepcopy(self._my_data)
for other_slice in slice_gen:
data = data[other_slice]
# We don't want a view of the data, so take a copy of it if it's
# not already our own.
if isinstance(data, biggus.Array) or not data.flags['OWNDATA']:
data = copy.deepcopy(data)
# We can turn a masked array into a normal array if it's full.
if isinstance(data, ma.core.MaskedArray):
if ma.count_masked(data) == 0:
data = data.filled()
# Make the new cube slice
cube = Cube(data)
cube.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
# Slice the coords
for coord in self.aux_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
try:
new_coord = coord[coord_keys]
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_coord_dims(coord))
coord_mapping[id(coord)] = new_coord
for coord in self.dim_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
new_dims = new_coord_dims(coord)
# Try/Catch to handle slicing that makes the points/bounds
# non-monotonic
try:
new_coord = coord[coord_keys]
if not new_dims:
# If the associated dimension has been sliced so the coord
# is a scalar move the coord to the aux_coords container
cube.add_aux_coord(new_coord, new_dims)
else:
cube.add_dim_coord(new_coord, new_dims)
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_dims)
coord_mapping[id(coord)] = new_coord
for factory in self.aux_factories:
cube.add_aux_factory(factory.updated(coord_mapping))
# slice the cell measures and add them to the cube
for cellmeasure in self.cell_measures():
dims = self.cell_measure_dims(cellmeasure)
cm_keys = tuple([full_slice[dim] for dim in dims])
new_cm = cellmeasure[cm_keys]
cube.add_cell_measure(new_cm,
new_cell_measure_dims(cellmeasure))
return cube
def subset(self, coord):
"""
Get a subset of the cube by providing the desired resultant
coordinate. If the coordinate provided applies to the whole cube; the
whole cube is returned. As such, the operation is not strict.
"""
if not isinstance(coord, iris.coords.Coord):
raise ValueError('coord_to_extract must be a valid Coord.')
# Get the coord to extract from the cube
coord_to_extract = self.coord(coord)
# If scalar, return the whole cube. Not possible to subset 1 point.
if coord_to_extract in self.aux_coords and\
len(coord_to_extract.points) == 1:
# Default to returning None
result = None
indices = coord_to_extract.intersect(coord, return_indices=True)
# If there is an intersect between the two scalar coordinates;
# return the whole cube. Else, return None.
if len(indices):
result = self
else:
if len(self.coord_dims(coord_to_extract)) > 1:
msg = "Currently, only 1D coords can be used to subset a cube"
raise iris.exceptions.CoordinateMultiDimError(msg)
# Identify the dimension of the cube which this coordinate
# references
coord_to_extract_dim = self.coord_dims(coord_to_extract)[0]
# Identify the indices which intersect the requested coord and
# coord_to_extract
coord_indices = coord_to_extract.intersect(coord,
return_indices=True)
# Build up a slice which spans the whole of the cube
full_slice = [slice(None, None)] * len(self.shape)
# Update the full slice to only extract specific indices which
# were identified above
full_slice[coord_to_extract_dim] = coord_indices
full_slice = tuple(full_slice)
result = self[full_slice]
return result
def extract(self, constraint):
"""
Filter the cube by the given constraint using
:meth:`iris.Constraint.extract` method.
"""
# Cast the constraint into a proper constraint if it is not so already
constraint = iris._constraints.as_constraint(constraint)
return constraint.extract(self)
def intersection(self, *args, **kwargs):
"""
Return the intersection of the cube with specified coordinate
ranges.
Coordinate ranges can be specified as:
(a) instances of :class:`iris.coords.CoordExtent`.
(b) keyword arguments, where the keyword name specifies the name
of the coordinate (as defined in :meth:`iris.cube.Cube.coords()`)
and the value defines the corresponding range of coordinate
values as a tuple. The tuple must contain two, three, or four
items corresponding to: (minimum, maximum, min_inclusive,
max_inclusive). Where the items are defined as:
* minimum
The minimum value of the range to select.
* maximum
The maximum value of the range to select.
* min_inclusive
If True, coordinate values equal to `minimum` will be included
in the selection. Default is True.
* max_inclusive
If True, coordinate values equal to `maximum` will be included
in the selection. Default is True.
To perform an intersection that ignores any bounds on the coordinates,
set the optional keyword argument *ignore_bounds* to True. Defaults to
False.
.. note::
For ranges defined over "circular" coordinates (i.e. those
where the `units` attribute has a modulus defined) the cube
will be "rolled" to fit where neccesary.
.. warning::
Currently this routine only works with "circular"
coordinates (as defined in the previous note.)
For example::
>>> import iris
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube.coord('longitude').points[::10])
[ 0. 37.49999237 74.99998474 112.49996948 \
149.99996948
187.49995422 224.99993896 262.49993896 299.99993896 \
337.49990845]
>>> subset = cube.intersection(longitude=(30, 50))
>>> print(subset.coord('longitude').points)
[ 33.74999237 37.49999237 41.24998856 44.99998856 48.74998856]
>>> subset = cube.intersection(longitude=(-10, 10))
>>> print(subset.coord('longitude').points)
[-7.50012207 -3.75012207 0. 3.75 7.5 ]
Returns:
A new :class:`~iris.cube.Cube` giving the subset of the cube
which intersects with the requested coordinate intervals.
"""
result = self
ignore_bounds = kwargs.pop('ignore_bounds', False)
for arg in args:
result = result._intersect(*arg, ignore_bounds=ignore_bounds)
for name, value in six.iteritems(kwargs):
result = result._intersect(name, *value,
ignore_bounds=ignore_bounds)
return result
def _intersect(self, name_or_coord, minimum, maximum,
min_inclusive=True, max_inclusive=True,
ignore_bounds=False):
coord = self.coord(name_or_coord)
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if coord.nbounds not in (0, 2):
raise ValueError('expected 0 or 2 bound values per cell')
if minimum > maximum:
raise ValueError('minimum greater than maximum')
modulus = coord.units.modulus
if modulus is None:
raise ValueError('coordinate units with no modulus are not yet'
' supported')
subsets, points, bounds = self._intersect_modulus(coord,
minimum, maximum,
min_inclusive,
max_inclusive,
ignore_bounds)
# By this point we have either one or two subsets along the relevant
# dimension. If it's just one subset (which might be a slice or an
# unordered collection of indices) we can simply index the cube
# and we're done. If it's two subsets we need to stitch the two
# pieces together.
# subsets provides a way of slicing the coordinates to ensure that
# they remain contiguous. In doing so, this can mean
# transforming the data (this stitching together of two separate
# pieces).
def make_chunk(key):
chunk = self[key_tuple_prefix + (key,)]
chunk_coord = chunk.coord(coord)
chunk_coord.points = points[(key,)]
if chunk_coord.has_bounds():
chunk_coord.bounds = bounds[(key,)]
return chunk
dim, = self.coord_dims(coord)
key_tuple_prefix = (slice(None),) * dim
chunks = [make_chunk(key) for key in subsets]
if len(chunks) == 1:
result = chunks[0]
else:
if self.has_lazy_data():
data = biggus.LinearMosaic([chunk.lazy_data()
for chunk in chunks],
dim)
else:
module = ma if ma.isMaskedArray(self.data) else np
data = module.concatenate([chunk.data for chunk in chunks],
dim)
result = iris.cube.Cube(data)
result.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def create_coords(src_coords, add_coord):
# Add copies of the source coordinates, selecting
# the appropriate subsets out of coordinates which
# share the intersection dimension.
preserve_circular = (min_inclusive and max_inclusive and
abs(maximum - minimum) == modulus)
for src_coord in src_coords:
dims = self.coord_dims(src_coord)
if dim in dims:
dim_within_coord = dims.index(dim)
points = np.concatenate([chunk.coord(src_coord).points
for chunk in chunks],
dim_within_coord)
if src_coord.has_bounds():
bounds = np.concatenate(
[chunk.coord(src_coord).bounds
for chunk in chunks],
dim_within_coord)
else:
bounds = None
result_coord = src_coord.copy(points=points,
bounds=bounds)
circular = getattr(result_coord, 'circular', False)
if circular and not preserve_circular:
result_coord.circular = False
else:
result_coord = src_coord.copy()
add_coord(result_coord, dims)
coord_mapping[id(src_coord)] = result_coord
create_coords(self.dim_coords, result.add_dim_coord)
create_coords(self.aux_coords, result.add_aux_coord)
for factory in self.aux_factories:
result.add_aux_factory(factory.updated(coord_mapping))
return result
def _intersect_derive_subset(self, coord, points, bounds, inside_indices):
# Return the subsets, i.e. the means to allow the slicing of
# coordinates to ensure that they remain contiguous.
modulus = coord.units.modulus
delta = coord.points[inside_indices] - points[inside_indices]
step = np.rint(np.diff(delta) / modulus)
non_zero_step_indices = np.nonzero(step)[0]
def dim_coord_subset():
"""
Derive the subset for dimension coordinates.
Ensure that we do not wrap if blocks are at the very edge. That
is, if the very edge is wrapped and corresponds to base + period,
stop this unnecessary wraparound.
"""
# A contiguous block at the start and another at the end.
# (NB. We can't have more than two blocks because we've already
# restricted the coordinate's range to its modulus).
end_of_first_chunk = non_zero_step_indices[0]
index_of_second_chunk = inside_indices[end_of_first_chunk + 1]
final_index = points.size - 1
# Condition1: The two blocks don't themselves wrap
# (inside_indices is contiguous).
# Condition2: Are we chunked at either extreme edge.
edge_wrap = ((index_of_second_chunk ==
inside_indices[end_of_first_chunk] + 1) and
index_of_second_chunk in (final_index, 1))
subsets = None
if edge_wrap:
# Increasing coord
if coord.points[-1] > coord.points[0]:
index_end = -1
index_start = 0
# Decreasing coord
else:
index_end = 0
index_start = -1
# Unwrap points and bounds (if present and equal base + period)
if bounds is not None:
edge_equal_base_period = (
np.isclose(coord.bounds[index_end, index_end],
coord.bounds[index_start, index_start] +
modulus))
if edge_equal_base_period:
bounds[index_end, :] = coord.bounds[index_end, :]
else:
edge_equal_base_period = (
np.isclose(coord.points[index_end],
coord.points[index_start] +
modulus))
if edge_equal_base_period:
points[index_end] = coord.points[index_end]
subsets = [slice(inside_indices[0],
inside_indices[-1] + 1)]
# Either no edge wrap or edge wrap != base + period
# i.e. derive subset without alteration
if subsets is None:
subsets = [
slice(index_of_second_chunk, None),
slice(None, inside_indices[end_of_first_chunk] + 1)
]
return subsets
if isinstance(coord, iris.coords.DimCoord):
if non_zero_step_indices.size:
subsets = dim_coord_subset()
else:
# A single, contiguous block.
subsets = [slice(inside_indices[0], inside_indices[-1] + 1)]
else:
# An AuxCoord could have its values in an arbitrary
# order, and hence a range of values can select an
# arbitrary subset. Also, we want to preserve the order
# from the original AuxCoord. So we just use the indices
# directly.
subsets = [inside_indices]
return subsets
def _intersect_modulus(self, coord, minimum, maximum, min_inclusive,
max_inclusive, ignore_bounds):
modulus = coord.units.modulus
if maximum > minimum + modulus:
raise ValueError("requested range greater than coordinate's"
" unit's modulus")
if coord.has_bounds():
values = coord.bounds
else:
values = coord.points
if values.max() > values.min() + modulus:
raise ValueError("coordinate's range greater than coordinate's"
" unit's modulus")
min_comp = np.less_equal if min_inclusive else np.less
max_comp = np.less_equal if max_inclusive else np.less
if coord.has_bounds():
bounds = wrap_lons(coord.bounds, minimum, modulus)
if ignore_bounds:
points = wrap_lons(coord.points, minimum, modulus)
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
else:
inside = np.logical_and(min_comp(minimum, bounds),
max_comp(bounds, maximum))
inside_indices, = np.where(np.any(inside, axis=1))
# To ensure that bounds (and points) of matching cells aren't
# "scrambled" by the wrap operation we detect split cells that
# straddle the wrap point and choose a new wrap point which avoids
# split cells.
# For example: the cell [349.875, 350.4375] wrapped at -10 would
# become [349.875, -9.5625] which is no longer valid. The lower
# cell bound value (and possibly associated point) are
# recalculated so that they are consistent with the extended
# wrapping scheme which moves the wrap point to the correct lower
# bound value (-10.125) thus resulting in the cell no longer
# being split. For bounds which may extend exactly the length of
# the modulus, we simply preserve the point to bound difference,
# and call the new bounds = the new points + the difference.
pre_wrap_delta = np.diff(coord.bounds[inside_indices])
post_wrap_delta = np.diff(bounds[inside_indices])
close_enough = np.allclose(pre_wrap_delta, post_wrap_delta)
if not close_enough:
split_cell_indices, _ = np.where(pre_wrap_delta !=
post_wrap_delta)
# Recalculate the extended minimum.
indices = inside_indices[split_cell_indices]
cells = bounds[indices]
cells_delta = np.diff(coord.bounds[indices])
# Watch out for ascending/descending bounds
if cells_delta[0, 0] > 0:
cells[:, 0] = cells[:, 1] - cells_delta[:, 0]
minimum = np.min(cells[:, 0])
else:
cells[:, 1] = cells[:, 0] + cells_delta[:, 0]
minimum = np.min(cells[:, 1])
points = wrap_lons(coord.points, minimum, modulus)
bound_diffs = coord.points[:, np.newaxis] - coord.bounds
bounds = points[:, np.newaxis] - bound_diffs
else:
points = wrap_lons(coord.points, minimum, modulus)
bounds = None
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
# Determine the subsets
subsets = self._intersect_derive_subset(coord, points, bounds,
inside_indices)
return subsets, points, bounds
def _as_list_of_coords(self, names_or_coords):
"""
Convert a name, coord, or list of names/coords to a list of coords.
"""
# If not iterable, convert to list of a single item
if _is_single_item(names_or_coords):
names_or_coords = [names_or_coords]
coords = []
for name_or_coord in names_or_coords:
if (isinstance(name_or_coord, six.string_types) or
isinstance(name_or_coord, iris.coords.Coord)):
coords.append(self.coord(name_or_coord))
else:
# Don't know how to handle this type
msg = "Don't know how to handle coordinate of type %s. " \
"Ensure all coordinates are of type six.string_types or " \
"iris.coords.Coord." % type(name_or_coord)
raise TypeError(msg)
return coords
def slices_over(self, ref_to_slice):
"""
Return an iterator of all subcubes along a given coordinate or
dimension index, or multiple of these.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be iterated along (i.e. the
dimensions that are not returned in the subcubes).
A mix of input types can also be provided.
Returns:
An iterator of subcubes.
For example, to get all subcubes along the time dimension::
for sub_cube in cube.slices_over('time'):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices`.
.. note::
The order of dimension references to slice along does not affect
the order of returned items in the iterator; instead the ordering
is based on the fastest-changing dimension.
"""
# Required to handle a mix between types.
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
slice_dims = set()
for ref in ref_to_slice:
try:
coord, = self._as_list_of_coords(ref)
except TypeError:
dim = int(ref)
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
# Convert coord index to a single-element list to prevent a
# TypeError when `slice_dims.update` is called with it.
dims = [dim]
else:
dims = self.coord_dims(coord)
slice_dims.update(dims)
all_dims = set(range(self.ndim))
opposite_dims = list(all_dims - slice_dims)
return self.slices(opposite_dims, ordered=False)
def slices(self, ref_to_slice, ordered=True):
"""
Return an iterator of all subcubes given the coordinates or dimension
indices desired to be present in each subcube.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be returned in the subcubes (i.e.
the dimensions that are not iterated over).
A mix of input types can also be provided. They must all be
orthogonal (i.e. point to different dimensions).
Kwargs:
* ordered: if True, the order which the coords to slice or data_dims
are given will be the order in which they represent the data in
the resulting cube slices. If False, the order will follow that of
the source cube. Default is True.
Returns:
An iterator of subcubes.
For example, to get all 2d longitude/latitude subcubes from a
multi-dimensional cube::
for sub_cube in cube.slices(['longitude', 'latitude']):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices_over`.
"""
if not isinstance(ordered, bool):
raise TypeError("'ordered' argument to slices must be boolean.")
# Required to handle a mix between types
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
dim_to_slice = []
for ref in ref_to_slice:
try:
# attempt to handle as coordinate
coord = self._as_list_of_coords(ref)[0]
dims = self.coord_dims(coord)
if not dims:
msg = ('Requested an iterator over a coordinate ({}) '
'which does not describe a dimension.')
msg = msg.format(coord.name())
raise ValueError(msg)
dim_to_slice.extend(dims)
except TypeError:
try:
# attempt to handle as dimension index
dim = int(ref)
except ValueError:
raise ValueError('{} Incompatible type {} for '
'slicing'.format(ref, type(ref)))
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
dim_to_slice.append(dim)
if len(set(dim_to_slice)) != len(dim_to_slice):
msg = 'The requested coordinates are not orthogonal.'
raise ValueError(msg)
# Create a list with of the shape of our data
dims_index = list(self.shape)
# Set the dimensions which have been requested to length 1
for d in dim_to_slice:
dims_index[d] = 1
return _SliceIterator(self, dims_index, dim_to_slice, ordered)
def transpose(self, new_order=None):
"""
Re-order the data dimensions of the cube in-place.
new_order - list of ints, optional
By default, reverse the dimensions, otherwise permute the
axes according to the values given.
.. note:: If defined, new_order must span all of the data dimensions.
Example usage::
# put the second dimension first, followed by the third dimension,
and finally put the first dimension third cube.transpose([1, 2, 0])
"""
if new_order is None:
new_order = np.arange(self.data.ndim)[::-1]
elif len(new_order) != self.data.ndim:
raise ValueError('Incorrect number of dimensions.')
# The data needs to be copied, otherwise this view of the transposed
# data will not be contiguous. Ensure not to assign via the cube.data
# setter property since we are reshaping the cube payload in-place.
self._my_data = np.transpose(self.data, new_order).copy()
dim_mapping = {src: dest for dest, src in enumerate(new_order)}
def remap_dim_coord(coord_and_dim):
coord, dim = coord_and_dim
return coord, dim_mapping[dim]
self._dim_coords_and_dims = list(map(remap_dim_coord,
self._dim_coords_and_dims))
def remap_aux_coord(coord_and_dims):
coord, dims = coord_and_dims
return coord, tuple(dim_mapping[dim] for dim in dims)
self._aux_coords_and_dims = list(map(remap_aux_coord,
self._aux_coords_and_dims))
def xml(self, checksum=False, order=True, byteorder=True):
"""
Returns a fully valid CubeML string representation of the Cube.
"""
doc = Document()
cube_xml_element = self._xml_element(doc, checksum=checksum,
order=order,
byteorder=byteorder)
cube_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
doc.appendChild(cube_xml_element)
# Print our newly created XML
return doc.toprettyxml(indent=" ")
def _xml_element(self, doc, checksum=False, order=True, byteorder=True):
cube_xml_element = doc.createElement("cube")
if self.standard_name:
cube_xml_element.setAttribute('standard_name', self.standard_name)
if self.long_name:
cube_xml_element.setAttribute('long_name', self.long_name)
if self.var_name:
cube_xml_element.setAttribute('var_name', self.var_name)
cube_xml_element.setAttribute('units', str(self.units))
if self.attributes:
attributes_element = doc.createElement('attributes')
for name in sorted(six.iterkeys(self.attributes)):
attribute_element = doc.createElement('attribute')
attribute_element.setAttribute('name', name)
value = self.attributes[name]
# Strict check because we don't want namedtuples.
if type(value) in (list, tuple):
delimiter = '[]' if isinstance(value, list) else '()'
value = ', '.join(("'%s'"
if isinstance(item, six.string_types)
else '%s') % (item, ) for item in value)
value = delimiter[0] + value + delimiter[1]
else:
value = str(value)
attribute_element.setAttribute('value', value)
attributes_element.appendChild(attribute_element)
cube_xml_element.appendChild(attributes_element)
coords_xml_element = doc.createElement("coords")
for coord in sorted(self.coords(), key=lambda coord: coord.name()):
# make a "cube coordinate" element which holds the dimensions (if
# appropriate) which itself will have a sub-element of the
# coordinate instance itself.
cube_coord_xml_element = doc.createElement("coord")
coords_xml_element.appendChild(cube_coord_xml_element)
dims = list(self.coord_dims(coord))
if dims:
cube_coord_xml_element.setAttribute("datadims", repr(dims))
coord_xml_element = coord.xml_element(doc)
cube_coord_xml_element.appendChild(coord_xml_element)
cube_xml_element.appendChild(coords_xml_element)
# cell methods (no sorting!)
cell_methods_xml_element = doc.createElement("cellMethods")
for cm in self.cell_methods:
cell_method_xml_element = cm.xml_element(doc)
cell_methods_xml_element.appendChild(cell_method_xml_element)
cube_xml_element.appendChild(cell_methods_xml_element)
data_xml_element = doc.createElement("data")
data_xml_element.setAttribute("shape", str(self.shape))
# NB. Getting a checksum triggers any deferred loading,
# in which case it also has the side-effect of forcing the
# byte order to be native.
if checksum:
data = self.data
# Ensure consistent memory layout for checksums.
def normalise(data):
data = np.ascontiguousarray(data)
if data.dtype.newbyteorder('<') != data.dtype:
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('<')
return data
if isinstance(data, ma.MaskedArray):
# Fill in masked values to avoid the checksum being
# sensitive to unused numbers. Use a fixed value so
# a change in fill_value doesn't affect the
# checksum.
crc = '0x%08x' % (
zlib.crc32(normalise(data.filled(0))) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
if ma.is_masked(data):
crc = '0x%08x' % (
zlib.crc32(normalise(data.mask)) & 0xffffffff, )
else:
crc = 'no-masked-elements'
data_xml_element.setAttribute("mask_checksum", crc)
data_xml_element.setAttribute('fill_value',
str(data.fill_value))
else:
crc = '0x%08x' % (zlib.crc32(normalise(data)) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
elif self.has_lazy_data():
data_xml_element.setAttribute("state", "deferred")
else:
data_xml_element.setAttribute("state", "loaded")
# Add the dtype, and also the array and mask orders if the
# data is loaded.
if not self.has_lazy_data():
data = self.data
dtype = data.dtype
def _order(array):
order = ''
if array.flags['C_CONTIGUOUS']:
order = 'C'
elif array.flags['F_CONTIGUOUS']:
order = 'F'
return order
if order:
data_xml_element.setAttribute('order', _order(data))
# NB. dtype.byteorder can return '=', which is bad for
# cross-platform consistency - so we use dtype.str
# instead.
if byteorder:
array_byteorder = {'>': 'big', '<': 'little'}.get(dtype.str[0])
if array_byteorder is not None:
data_xml_element.setAttribute('byteorder', array_byteorder)
if order and isinstance(data, ma.core.MaskedArray):
data_xml_element.setAttribute('mask_order',
_order(data.mask))
else:
dtype = self.lazy_data().dtype
data_xml_element.setAttribute('dtype', dtype.name)
cube_xml_element.appendChild(data_xml_element)
return cube_xml_element
def copy(self, data=None):
"""
Returns a deep copy of this cube.
Kwargs:
* data:
Replace the data of the cube copy with provided data payload.
Returns:
A copy instance of the :class:`Cube`.
"""
return self._deepcopy({}, data)
def __copy__(self):
"""Shallow copying is disallowed for Cubes."""
raise copy.Error("Cube shallow-copy not allowed. Use deepcopy() or "
"Cube.copy()")
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo, data=None):
if data is None:
# Use a copy of the source cube data.
if self.has_lazy_data():
# Use copy.copy, as lazy arrays don't have a copy method.
new_cube_data = copy.copy(self.lazy_data())
else:
# Do *not* use copy.copy, as NumPy 0-d arrays do that wrong.
new_cube_data = self.data.copy()
else:
# Use the provided data (without copying it).
if not isinstance(data, biggus.Array):
data = np.asanyarray(data)
if data.shape != self.shape:
msg = 'Cannot copy cube with new data of a different shape ' \
'(slice or subset the cube first).'
raise ValueError(msg)
new_cube_data = data
new_dim_coords_and_dims = copy.deepcopy(self._dim_coords_and_dims,
memo)
new_aux_coords_and_dims = copy.deepcopy(self._aux_coords_and_dims,
memo)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
for old_pair, new_pair in zip(self._dim_coords_and_dims,
new_dim_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
for old_pair, new_pair in zip(self._aux_coords_and_dims,
new_aux_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
new_cube = Cube(new_cube_data,
dim_coords_and_dims=new_dim_coords_and_dims,
aux_coords_and_dims=new_aux_coords_and_dims)
new_cube.metadata = copy.deepcopy(self.metadata, memo)
for factory in self.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
# START OPERATOR OVERLOADS
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Cube):
result = self.metadata == other.metadata
# having checked the metadata, now check the coordinates
if result:
coord_comparison = iris.analysis.coord_comparison(self, other)
# if there are any coordinates which are not equal
result = not (coord_comparison['not_equal'] or
coord_comparison['non_equal_data_dimension'])
# having checked everything else, check approximate data
# equality - loading the data if has not already been loaded.
if result:
result = np.all(np.abs(self.data - other.data) < 1e-8)
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__, Python 3 does not enable it if __eq__ is defined
# This is necessary for merging, but probably shouldn't be used otherwise.
# See #962 and #1772.
def __hash__(self):
return hash(id(self))
def __add__(self, other):
return iris.analysis.maths.add(self, other, ignore=True)
__radd__ = __add__
def __sub__(self, other):
return iris.analysis.maths.subtract(self, other, ignore=True)
__mul__ = iris.analysis.maths.multiply
__rmul__ = iris.analysis.maths.multiply
__div__ = iris.analysis.maths.divide
__truediv__ = iris.analysis.maths.divide
__pow__ = iris.analysis.maths.exponentiate
# END OPERATOR OVERLOADS
def add_history(self, string):
"""
Add the given string to the cube's history.
If the history coordinate does not exist, then one will be created.
.. deprecated:: 1.6
Add/modify history metadata within
attr:`~iris.cube.Cube.attributes` as needed.
"""
warn_deprecated("Cube.add_history() has been deprecated - "
"please modify/create cube.attributes['history'] "
"as needed.")
timestamp = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")
string = '%s Iris: %s' % (timestamp, string)
try:
history = self.attributes['history']
self.attributes['history'] = '%s\n%s' % (history, string)
except KeyError:
self.attributes['history'] = string
# START ANALYSIS ROUTINES
regridded = iris.util._wrap_function_for_method(
iris.analysis._interpolate_private.regrid,
"""
Returns a new cube with values derived from this cube on the
horizontal grid specified by the grid_cube.
.. deprecated:: 1.10
Please replace usage of :meth:`~Cube.regridded` with
:meth:`~Cube.regrid`. See :meth:`iris.analysis.interpolate.regrid`
for details of exact usage equivalents.
""")
# END ANALYSIS ROUTINES
def collapsed(self, coords, aggregator, **kwargs):
"""
Collapse one or more dimensions over the cube given the coordinate/s
and an aggregation.
Examples of aggregations that may be used include
:data:`~iris.analysis.COUNT` and :data:`~iris.analysis.MAX`.
Weighted aggregations (:class:`iris.analysis.WeightedAggregator`) may
also be supplied. These include :data:`~iris.analysis.MEAN` and
sum :data:`~iris.analysis.SUM`.
Weighted aggregations support an optional *weights* keyword argument.
If set, this should be supplied as an array of weights whose shape
matches the cube. Values for latitude-longitude area weights may be
calculated using :func:`iris.analysis.cartography.area_weights`.
Some Iris aggregators support "lazy" evaluation, meaning that
cubes resulting from this method may represent data arrays which are
not computed until the data is requested (e.g. via ``cube.data`` or
``iris.save``). If lazy evaluation exists for the given aggregator
it will be used wherever possible when this cube's data is itself
a deferred array.
Args:
* coords (string, coord or a list of strings/coords):
Coordinate names/coordinates over which the cube should be
collapsed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied for collapse operation.
Kwargs:
* kwargs:
Aggregation function keyword arguments.
Returns:
Collapsed cube.
For example:
>>> import iris
>>> import iris.analysis
>>> path = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(path)
>>> new_cube = cube.collapsed('longitude', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) (time: 54; latitude: 18)
Dimension coordinates:
time x -
latitude - x
Auxiliary coordinates:
forecast_reference_time x -
Scalar coordinates:
forecast_period: 0 hours
longitude: 180.0 degrees, bound=(0.0, 360.0) degrees
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: longitude
.. note::
Some aggregations are not commutative and hence the order of
processing is important i.e.::
tmp = cube.collapsed('realization', iris.analysis.VARIANCE)
result = tmp.collapsed('height', iris.analysis.VARIANCE)
is not necessarily the same result as::
tmp = cube.collapsed('height', iris.analysis.VARIANCE)
result2 = tmp.collapsed('realization', iris.analysis.VARIANCE)
Conversely operations which operate on more than one coordinate
at the same time are commutative as they are combined internally
into a single operation. Hence the order of the coordinates
supplied in the list does not matter::
cube.collapsed(['longitude', 'latitude'],
iris.analysis.VARIANCE)
is the same (apart from the logically equivalent cell methods that
may be created etc.) as::
cube.collapsed(['latitude', 'longitude'],
iris.analysis.VARIANCE)
.. _partially_collapse_multi-dim_coord:
.. note::
You cannot partially collapse a multi-dimensional coordinate. Doing
so would result in a partial collapse of the multi-dimensional
coordinate. Instead you must either:
* collapse in a single operation all cube axes that the
multi-dimensional coordinate spans,
* remove the multi-dimensional coordinate from the cube before
performing the collapse operation, or
* not collapse the coordinate at all.
Multi-dimensional derived coordinates will not prevent a successful
collapse operation.
"""
# Convert any coordinate names to coordinates
coords = self._as_list_of_coords(coords)
if (isinstance(aggregator, iris.analysis.WeightedAggregator) and
not aggregator.uses_weighting(**kwargs)):
msg = "Collapsing spatial coordinate {!r} without weighting"
lat_match = [coord for coord in coords
if 'latitude' in coord.name()]
if lat_match:
for coord in lat_match:
warnings.warn(msg.format(coord.name()))
# Determine the dimensions we need to collapse (and those we don't)
if aggregator.cell_method == 'peak':
dims_to_collapse = [list(self.coord_dims(coord))
for coord in coords]
# Remove duplicate dimensions.
new_dims = collections.OrderedDict.fromkeys(
d for dim in dims_to_collapse for d in dim)
# Reverse the dimensions so the order can be maintained when
# reshaping the data.
dims_to_collapse = list(new_dims)[::-1]
else:
dims_to_collapse = set()
for coord in coords:
dims_to_collapse.update(self.coord_dims(coord))
if not dims_to_collapse:
msg = 'Cannot collapse a dimension which does not describe any ' \
'data.'
raise iris.exceptions.CoordinateCollapseError(msg)
untouched_dims = set(range(self.ndim)) - set(dims_to_collapse)
# Remove the collapsed dimension(s) from the metadata
indices = [slice(None, None)] * self.ndim
for dim in dims_to_collapse:
indices[dim] = 0
collapsed_cube = self[tuple(indices)]
# Collapse any coords that span the dimension(s) being collapsed
for coord in self.dim_coords + self.aux_coords:
coord_dims = self.coord_dims(coord)
if set(dims_to_collapse).intersection(coord_dims):
local_dims = [coord_dims.index(dim) for dim in
dims_to_collapse if dim in coord_dims]
collapsed_cube.replace_coord(coord.collapsed(local_dims))
untouched_dims = sorted(untouched_dims)
# Record the axis(s) argument passed to 'aggregation', so the same is
# passed to the 'update_metadata' function.
collapse_axis = -1
data_result = None
# Perform the actual aggregation.
if aggregator.cell_method == 'peak':
# The PEAK aggregator must collapse each coordinate separately.
untouched_shape = [self.shape[d] for d in untouched_dims]
collapsed_shape = [self.shape[d] for d in dims_to_collapse]
new_shape = untouched_shape + collapsed_shape
array_dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(
self.data, array_dims).reshape(new_shape)
for dim in dims_to_collapse:
unrolled_data = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
data_result = unrolled_data
# Perform the aggregation in lazy form if possible.
elif (aggregator.lazy_func is not None
and len(dims_to_collapse) == 1 and self.has_lazy_data()):
# Use a lazy operation separately defined by the aggregator, based
# on the cube lazy array.
# NOTE: do not reform the data in this case, as 'lazy_aggregate'
# accepts multiple axes (unlike 'aggregate').
collapse_axis = dims_to_collapse
try:
data_result = aggregator.lazy_aggregate(self.lazy_data(),
collapse_axis,
**kwargs)
except TypeError:
# TypeError - when unexpected keywords passed through (such as
# weights to mean)
pass
# If we weren't able to complete a lazy aggregation, compute it
# directly now.
if data_result is None:
# Perform the (non-lazy) aggregation over the cube data
# First reshape the data so that the dimensions being aggregated
# over are grouped 'at the end' (i.e. axis=-1).
dims_to_collapse = sorted(dims_to_collapse)
end_size = reduce(operator.mul, (self.shape[dim] for dim in
dims_to_collapse))
untouched_shape = [self.shape[dim] for dim in untouched_dims]
new_shape = untouched_shape + [end_size]
dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(self.data, dims).reshape(new_shape)
# Perform the same operation on the weights if applicable
if kwargs.get("weights") is not None:
weights = kwargs["weights"].view()
kwargs["weights"] = np.transpose(weights,
dims).reshape(new_shape)
data_result = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
aggregator.update_metadata(collapsed_cube, coords, axis=collapse_axis,
**kwargs)
result = aggregator.post_process(collapsed_cube, data_result, coords,
**kwargs)
return result
def aggregated_by(self, coords, aggregator, **kwargs):
"""
Perform aggregation over the cube given one or more "group
coordinates".
A "group coordinate" is a coordinate where repeating values represent a
single group, such as a month coordinate on a daily time slice.
Repeated values will form a group even if they are not consecutive.
The group coordinates must all be over the same cube dimension. Each
common value group identified over all the group-by coordinates is
collapsed using the provided aggregator.
Args:
* coords (list of coord names or :class:`iris.coords.Coord` instances):
One or more coordinates over which group aggregation is to be
performed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to each group.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris
>>> import iris.analysis
>>> import iris.coord_categorisation as cat
>>> fname = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(fname, 'surface_temperature')
>>> cat.add_year(cube, 'time', name='year')
>>> new_cube = cube.aggregated_by('year', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) \
(time: 5; latitude: 18; longitude: 432)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_reference_time \
x - -
year \
x - -
Scalar coordinates:
forecast_period: 0 hours
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: year
"""
groupby_coords = []
dimension_to_groupby = None
# We can't handle weights
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
raise ValueError('Invalid Aggregation, aggregated_by() cannot use'
' weights.')
coords = self._as_list_of_coords(coords)
for coord in sorted(coords, key=lambda coord: coord._as_defn()):
if coord.ndim > 1:
msg = 'Cannot aggregate_by coord %s as it is ' \
'multidimensional.' % coord.name()
raise iris.exceptions.CoordinateMultiDimError(msg)
dimension = self.coord_dims(coord)
if not dimension:
msg = 'Cannot group-by the coordinate "%s", as its ' \
'dimension does not describe any data.' % coord.name()
raise iris.exceptions.CoordinateCollapseError(msg)
if dimension_to_groupby is None:
dimension_to_groupby = dimension[0]
if dimension_to_groupby != dimension[0]:
msg = 'Cannot group-by coordinates over different dimensions.'
raise iris.exceptions.CoordinateCollapseError(msg)
groupby_coords.append(coord)
# Determine the other coordinates that share the same group-by
# coordinate dimension.
shared_coords = list(filter(
lambda coord_: coord_ not in groupby_coords,
self.coords(dimensions=dimension_to_groupby)))
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby(groupby_coords, shared_coords)
# Create the resulting aggregate-by cube and remove the original
# coordinates that are going to be groupedby.
key = [slice(None, None)] * self.ndim
# Generate unique index tuple key to maintain monotonicity.
key[dimension_to_groupby] = tuple(range(len(groupby)))
key = tuple(key)
aggregateby_cube = self[key]
for coord in groupby_coords + shared_coords:
aggregateby_cube.remove_coord(coord)
# Determine the group-by cube data shape.
data_shape = list(self.shape + aggregator.aggregate_shape(**kwargs))
data_shape[dimension_to_groupby] = len(groupby)
# Aggregate the group-by data.
cube_slice = [slice(None, None)] * len(data_shape)
for i, groupby_slice in enumerate(groupby.group()):
# Slice the cube with the group-by slice to create a group-by
# sub-cube.
cube_slice[dimension_to_groupby] = groupby_slice
groupby_sub_cube = self[tuple(cube_slice)]
# Perform the aggregation over the group-by sub-cube and
# repatriate the aggregated data into the aggregate-by cube data.
cube_slice[dimension_to_groupby] = i
result = aggregator.aggregate(groupby_sub_cube.data,
axis=dimension_to_groupby,
**kwargs)
# Determine aggregation result data type for the aggregate-by cube
# data on first pass.
if i == 0:
if isinstance(self.data, ma.MaskedArray):
aggregateby_data = ma.zeros(data_shape, dtype=result.dtype)
else:
aggregateby_data = np.zeros(data_shape, dtype=result.dtype)
aggregateby_data[tuple(cube_slice)] = result
# Add the aggregation meta data to the aggregate-by cube.
aggregator.update_metadata(aggregateby_cube,
groupby_coords,
aggregate=True, **kwargs)
# Replace the appropriate coordinates within the aggregate-by cube.
dim_coord, = self.coords(dimensions=dimension_to_groupby,
dim_coords=True) or [None]
for coord in groupby.coords:
if dim_coord is not None and \
dim_coord._as_defn() == coord._as_defn() and \
isinstance(coord, iris.coords.DimCoord):
aggregateby_cube.add_dim_coord(coord.copy(),
dimension_to_groupby)
else:
aggregateby_cube.add_aux_coord(coord.copy(),
dimension_to_groupby)
# Attach the aggregate-by data into the aggregate-by cube.
aggregateby_cube = aggregator.post_process(aggregateby_cube,
aggregateby_data,
coords, **kwargs)
return aggregateby_cube
def rolling_window(self, coord, aggregator, window, **kwargs):
"""
Perform rolling window aggregation on a cube given a coordinate, an
aggregation method and a window size.
Args:
* coord (string/:class:`iris.coords.Coord`):
The coordinate over which to perform the rolling window
aggregation.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to the data.
* window (int):
Size of window to use.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments. The weights
argument to the aggregator, if any, should be a 1d array with the
same length as the chosen window.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris, iris.analysis
>>> fname = iris.sample_data_path('GloSea4', 'ensemble_010.pp')
>>> air_press = iris.load_cube(fname, 'surface_temperature')
>>> print(air_press)
surface_temperature / (K) \
(time: 6; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
>>> print(air_press.rolling_window('time', iris.analysis.MEAN, 3))
surface_temperature / (K) \
(time: 4; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
mean: time
Notice that the forecast_period dimension now represents the 4
possible windows of size 3 from the original cube.
"""
coord = self._as_list_of_coords(coord)[0]
if getattr(coord, 'circular', False):
raise iris.exceptions.NotYetImplementedError(
'Rolling window over a circular coordinate.')
if window < 2:
raise ValueError('Cannot perform rolling window '
'with a window size less than 2.')
if coord.ndim > 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
dimension = self.coord_dims(coord)
if len(dimension) != 1:
raise iris.exceptions.CoordinateCollapseError(
'Cannot perform rolling window with coordinate "%s", '
'must map to one data dimension.' % coord.name())
dimension = dimension[0]
# Use indexing to get a result-cube of the correct shape.
# NB. This indexes the data array which is wasted work.
# As index-to-get-shape-then-fiddle is a common pattern, perhaps
# some sort of `cube.prepare()` method would be handy to allow
# re-shaping with given data, and returning a mapping of
# old-to-new-coords (to avoid having to use metadata identity)?
key = [slice(None, None)] * self.ndim
key[dimension] = slice(None, self.shape[dimension] - window + 1)
new_cube = self[tuple(key)]
# take a view of the original data using the rolling_window function
# this will add an extra dimension to the data at dimension + 1 which
# represents the rolled window (i.e. will have a length of window)
rolling_window_data = iris.util.rolling_window(self.data,
window=window,
axis=dimension)
# now update all of the coordinates to reflect the aggregation
for coord_ in self.coords(dimensions=dimension):
if coord_.has_bounds():
warnings.warn('The bounds of coordinate %r were ignored in '
'the rolling window operation.' % coord_.name())
if coord_.ndim != 1:
raise ValueError('Cannot calculate the rolling '
'window of %s as it is a multidimensional '
'coordinate.' % coord_.name())
new_bounds = iris.util.rolling_window(coord_.points, window)
if np.issubdtype(new_bounds.dtype, np.str):
# Handle case where the AuxCoord contains string. The points
# are the serialized form of the points contributing to each
# window and the bounds are the first and last points in the
# window as with numeric coordinates.
new_points = np.apply_along_axis(lambda x: '|'.join(x), -1,
new_bounds)
new_bounds = new_bounds[:, (0, -1)]
else:
# Take the first and last element of the rolled window (i.e.
# the bounds) and the new points are the midpoints of these
# bounds.
new_bounds = new_bounds[:, (0, -1)]
new_points = np.mean(new_bounds, axis=-1)
# wipe the coords points and set the bounds
new_coord = new_cube.coord(coord_)
new_coord.points = new_points
new_coord.bounds = new_bounds
# update the metadata of the cube itself
aggregator.update_metadata(
new_cube, [coord],
action='with a rolling window of length %s over' % window,
**kwargs)
# and perform the data transformation, generating weights first if
# needed
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
if 'weights' in kwargs:
weights = kwargs['weights']
if weights.ndim > 1 or weights.shape[0] != window:
raise ValueError('Weights for rolling window aggregation '
'must be a 1d array with the same length '
'as the window.')
kwargs = dict(kwargs)
kwargs['weights'] = iris.util.broadcast_to_shape(
weights, rolling_window_data.shape, (dimension + 1,))
data_result = aggregator.aggregate(rolling_window_data,
axis=dimension + 1,
**kwargs)
result = aggregator.post_process(new_cube, data_result, [coord],
**kwargs)
return result
def interpolate(self, sample_points, scheme, collapse_scalar=True):
"""
Interpolate from this :class:`~iris.cube.Cube` to the given
sample points using the given interpolation scheme.
Args:
* sample_points:
A sequence of (coordinate, points) pairs over which to
interpolate. The values for coordinates that correspond to
dates or times may optionally be supplied as datetime.datetime or
netcdftime.datetime instances.
* scheme:
The type of interpolation to use to interpolate from this
:class:`~iris.cube.Cube` to the given sample points. The
interpolation schemes currently available in Iris are:
* :class:`iris.analysis.Linear`, and
* :class:`iris.analysis.Nearest`.
Kwargs:
* collapse_scalar:
Whether to collapse the dimension of scalar sample points
in the resulting cube. Default is True.
Returns:
A cube interpolated at the given sample points.
If `collapse_scalar` is True then the dimensionality of the cube
will be the number of original cube dimensions minus
the number of scalar coordinates.
For example:
>>> import datetime
>>> import iris
>>> path = iris.sample_data_path('uk_hires.pp')
>>> cube = iris.load_cube(path, 'air_potential_temperature')
>>> print(cube.summary(shorten=True))
air_potential_temperature / (K) \
(time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(cube.coord('time'))
DimCoord([2009-11-19 10:00:00, 2009-11-19 11:00:00, \
2009-11-19 12:00:00], standard_name='time', calendar='gregorian')
>>> print(cube.coord('time').points)
[ 349618. 349619. 349620.]
>>> samples = [('time', 349618.5)]
>>> result = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result.coord('time'))
DimCoord([2009-11-19 10:30:00], standard_name='time', \
calendar='gregorian')
>>> print(result.coord('time').points)
[ 349618.5]
>>> # For datetime-like coordinates, we can also use
>>> # datetime-like objects.
>>> samples = [('time', datetime.datetime(2009, 11, 19, 10, 30))]
>>> result2 = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result2.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result2.coord('time'))
DimCoord([2009-11-19 10:30:00], standard_name='time', \
calendar='gregorian')
>>> print(result2.coord('time').points)
[ 349618.5]
>>> print(result == result2)
True
"""
coords, points = zip(*sample_points)
interp = scheme.interpolator(self, coords)
return interp(points, collapse_scalar=collapse_scalar)
def regrid(self, grid, scheme):
"""
Regrid this :class:`~iris.cube.Cube` on to the given target `grid`
using the given regridding `scheme`.
Args:
* grid:
A :class:`~iris.cube.Cube` that defines the target grid.
* scheme:
The type of regridding to use to regrid this cube onto the
target grid. The regridding schemes currently available
in Iris are:
* :class:`iris.analysis.Linear`,
* :class:`iris.analysis.Nearest`, and
* :class:`iris.analysis.AreaWeighted`.
Returns:
A cube defined with the horizontal dimensions of the target grid
and the other dimensions from this cube. The data values of
this cube will be converted to values on the new grid
according to the given regridding scheme.
"""
regridder = scheme.regridder(self, grid)
return regridder(self)
class ClassDict(collections.MutableMapping, object):
"""
A mapping that stores objects keyed on their superclasses and their names.
The mapping has a root class, all stored objects must be a subclass of the
root class. The superclasses used for an object include the class of the
object, but do not include the root class. Only one object is allowed for
any key.
"""
def __init__(self, superclass):
if not isinstance(superclass, type):
raise TypeError("The superclass must be a Python type or new "
"style class.")
self._superclass = superclass
self._basic_map = {}
self._retrieval_map = {}
def add(self, object_, replace=False):
'''Add an object to the dictionary.'''
if not isinstance(object_, self._superclass):
msg = "Only subclasses of {!r} are allowed as values.".format(
self._superclass.__name__)
raise TypeError(msg)
# Find all the superclasses of the given object, starting with the
# object's class.
superclasses = type.mro(type(object_))
if not replace:
# Ensure nothing else is already registered against those
# superclasses.
# NB. This implies the _basic_map will also be empty for this
# object.
for key_class in superclasses:
if key_class in self._retrieval_map:
msg = "Cannot add instance of '%s' because instance of " \
"'%s' already added." % (type(object_).__name__,
key_class.__name__)
raise ValueError(msg)
# Register the given object against those superclasses.
for key_class in superclasses:
self._retrieval_map[key_class] = object_
self._retrieval_map[key_class.__name__] = object_
self._basic_map[type(object_)] = object_
def __getitem__(self, class_):
try:
return self._retrieval_map[class_]
except KeyError:
raise KeyError('Coordinate system %r does not exist.' % class_)
def __setitem__(self, key, value):
raise NotImplementedError('You must call the add method instead.')
def __delitem__(self, class_):
cs = self[class_]
keys = [k for k, v in six.iteritems(self._retrieval_map) if v == cs]
for key in keys:
del self._retrieval_map[key]
del self._basic_map[type(cs)]
return cs
def __len__(self):
return len(self._basic_map)
def __iter__(self):
for item in self._basic_map:
yield item
def keys(self):
'''Return the keys of the dictionary mapping.'''
return self._basic_map.keys()
def sorted_axes(axes):
"""
Returns the axis names sorted alphabetically, with the exception that
't', 'z', 'y', and, 'x' are sorted to the end.
"""
return sorted(axes, key=lambda name: ({'x': 4,
'y': 3,
'z': 2,
't': 1}.get(name, 0), name))
# See Cube.slice() for the definition/context.
class _SliceIterator(collections.Iterator):
def __init__(self, cube, dims_index, requested_dims, ordered):
self._cube = cube
# Let Numpy do some work in providing all of the permutations of our
# data shape. This functionality is something like:
# ndindex(2, 1, 3) -> [(0, 0, 0), (0, 0, 1), (0, 0, 2),
# (1, 0, 0), (1, 0, 1), (1, 0, 2)]
self._ndindex = np.ndindex(*dims_index)
self._requested_dims = requested_dims
# indexing relating to sliced cube
self._mod_requested_dims = np.argsort(requested_dims)
self._ordered = ordered
def __next__(self):
# NB. When self._ndindex runs out it will raise StopIteration for us.
index_tuple = next(self._ndindex)
# Turn the given tuple into a list so that we can do something with it
index_list = list(index_tuple)
# For each of the spanning dimensions requested, replace the 0 with a
# spanning slice
for d in self._requested_dims:
index_list[d] = slice(None, None)
# Request the slice
cube = self._cube[tuple(index_list)]
if self._ordered:
if any(self._mod_requested_dims != list(range(len(cube.shape)))):
cube.transpose(self._mod_requested_dims)
return cube
next = __next__
|
SusanJL/iris
|
lib/iris/cube.py
|
Python
|
gpl-3.0
| 158,463
|
[
"NetCDF"
] |
3f86ff19647816f6a5d176a99bb928a6931d8edc1afa9784ee93aec3f0d80926
|
"""
Tests on the repository activation/deactivation functions
"""
from octopus.modules.es.testindex import ESTestCase
from service import control, models
import time
class TestModels(ESTestCase):
def setUp(self):
super(TestModels, self).setUp()
def tearDown(self):
super(TestModels, self).tearDown()
def test_01_activate_deactivate(self):
# first, activation should create a status if none exists
control.activate_deposit("123456789")
time.sleep(2)
rs = models.RepositoryStatus.pull("123456789")
assert rs is not None
assert rs.status == "succeeding"
# now deactivate that account
control.deactivate_deposit("123456789")
time.sleep(2)
rs = models.RepositoryStatus.pull("123456789")
assert rs is not None
assert rs.status == "failing"
# now re-activate that account
control.activate_deposit("123456789")
time.sleep(2)
rs = models.RepositoryStatus.pull("123456789")
assert rs is not None
assert rs.status == "succeeding"
|
JiscPER/jper-sword-out
|
service/tests/unit/test_control.py
|
Python
|
apache-2.0
| 1,105
|
[
"Octopus"
] |
bf9623dcdf8290c9fd13c1ba67411d4b305fa3ff7ada52aec608c6e9ef6d61a7
|
"""
Unit tests for calculation of lattice Green function for diffusion
"""
__author__ = 'Dallas R. Trinkle'
import unittest
import numpy as np
from scipy import special
import onsager.GFcalc as GFcalc
import onsager.crystal as crystal
def poleFT(di, u, pm, erfupm=-1):
"""
Calculates the pole FT (excluding the volume prefactor) given the `di` eigenvalues,
the value of u magnitude (available from unorm), and the pmax scaling factor.
:param di: array [:] eigenvalues of `D2`
:param u: double magnitude of u, from unorm() = x.D^-1.x
:param pm: double scaling factor pmax for exponential cutoff function
:param erfupm: double, optional value of erf(0.5*u*pm) (negative = not set, then its calculated)
:return poleFT: double
integral of Gaussian cutoff function corresponding to a l=0 pole;
:math:`\\erf(0.5 u pm)/(4\\pi u \\sqrt{d1 d2 d3})` if u>0
:math:`pm/(4\pi^3/2 \\sqrt{d1 d2 d3})` if u==0
"""
if (u == 0):
return 0.25 * pm / np.sqrt(np.product(di * np.pi))
if (erfupm < 0):
erfupm = special.erf(0.5 * u * pm)
return erfupm * 0.25 / (np.pi * u * np.sqrt(np.product(di)))
class GreenFuncCrystalTests(unittest.TestCase):
"""Test new implementation of GF calculator, based on Crystal class"""
longMessage = False
def setUp(self):
pass
def testFCC(self):
"""Test on FCC"""
FCC = crystal.Crystal.FCC(1.)
FCC_sitelist = FCC.sitelist(0)
FCC_jumpnetwork = FCC.jumpnetwork(0, 0.75)
FCC_GF = GFcalc.GFCrystalcalc(FCC, 0, FCC_sitelist, FCC_jumpnetwork, Nmax=4)
FCC_GF.SetRates([1], [0], [1], [0])
# test the pole function:
for u in np.linspace(0, 5, 21):
pole_orig = FCC_GF.crys.volume * poleFT(FCC_GF.d, u, FCC_GF.pmax)
pole_new = FCC_GF.g_Taylor_fnlu[(-2, 0)](u).real
self.assertAlmostEqual(pole_orig, pole_new, places=15, msg="Pole (-2,0) failed for u={}".format(u))
# test the discontinuity function:
for u in np.linspace(0, 5, 21):
disc_orig = FCC_GF.crys.volume * (FCC_GF.pmax / (2 * np.sqrt(np.pi))) ** 3 * \
np.exp(-(0.5 * u * FCC_GF.pmax) ** 2) / np.sqrt(np.product(FCC_GF.d))
disc_new = FCC_GF.g_Taylor_fnlu[(0, 0)](u).real
self.assertAlmostEqual(disc_orig, disc_new, places=15, msg="Disc (0,0) failed for u={}".format(u))
# test the GF evaluation against the original
# NNvect = np.array([dx for (i,j), dx in FCC_jumpnetwork[0]])
# rates = np.array([1 for jump in NNvect])
# old_FCC_GF = GFcalc.GFcalc(self.FCC.lattice, NNvect, rates)
# for R in [np.array([0.,0.,0.]), np.array([0.5, 0.5, 0.]), np.array([0.5, 0., 0.5]), \
# np.array([1.,0.,0.]), np.array([1.,0.5,0.5]), np.array([1.,1.,0.])]:
# GF_orig = old_FCC_GF.GF(R)
# GF_new = FCC_GF(0,0,R)
# # print("R={}: dG= {} G_orig= {} G_new= {}".format(R, GF_new-GF_orig, GF_orig, GF_new))
# self.assertAlmostEqual(GF_orig, GF_new, places=5,
# msg="Failed for R={}".format(R))
def testHCP(self):
"""Test on HCP"""
HCP = crystal.Crystal.HCP(1., np.sqrt(8 / 3))
HCP_sitelist = HCP.sitelist(0)
HCP_jumpnetwork = HCP.jumpnetwork(0, 1.01)
HCP_GF = GFcalc.GFCrystalcalc(HCP, 0, HCP_sitelist, HCP_jumpnetwork, Nmax=4)
HCP_GF.SetRates([1], [0], [1, 1], [0, 0]) # one unique site, two types of jumps
# print(HCP_GF.Diffusivity())
# make some basic vectors:
hcp_basal = HCP.pos2cart(np.array([1., 0., 0.]), (0, 0)) - \
HCP.pos2cart(np.array([0., 0., 0.]), (0, 0))
hcp_pyram = HCP.pos2cart(np.array([0., 0., 0.]), (0, 1)) - \
HCP.pos2cart(np.array([0., 0., 0.]), (0, 0))
hcp_zero = np.zeros(3)
for R in [hcp_zero, hcp_basal, hcp_pyram]:
self.assertAlmostEqual(HCP_GF(0, 0, R), HCP_GF(1, 1, R), places=15)
self.assertAlmostEqual(HCP_GF(0, 0, hcp_basal), HCP_GF(0, 0, -hcp_basal), places=15)
self.assertAlmostEqual(HCP_GF(0, 1, hcp_pyram), HCP_GF(1, 0, -hcp_pyram), places=15)
g0 = HCP_GF(0, 0, hcp_zero)
gbasal = HCP_GF(0, 0, hcp_basal)
gpyram = HCP_GF(0, 1, hcp_pyram)
self.assertAlmostEqual(-12 * g0 + 6 * gbasal + 6 * gpyram, 1, places=6)
# Try again, but with different rates:
HCP_GF.SetRates([1], [0], [1, 3], [0, 0]) # one unique site, two types of jumps
g0 = HCP_GF(0, 0, hcp_zero)
gw = 0
for jumplist, omega in zip(HCP_jumpnetwork, HCP_GF.symmrate * HCP_GF.maxrate):
for (i, j), dx in jumplist:
if (i == 0):
gw += omega * (HCP_GF(i, j, dx) - g0)
self.assertAlmostEqual(gw, 1, places=6)
def testsquare(self):
"""Test on square"""
square = crystal.Crystal(np.eye(2), [np.zeros(2)])
square_sitelist = square.sitelist(0)
square_jumpnetwork = square.jumpnetwork(0, 1.01)
square_GF = GFcalc.GFCrystalcalc(square, 0, square_sitelist, square_jumpnetwork, Nmax=4)
square_GF.SetRates([1], [0], [1], [0])
square_zero = np.zeros(2)
square_1nn = np.array([1.,0.])
square_2nn = np.array([1.,1.])
square_3nn = np.array([2.,0.])
g0 = square_GF(0, 0, square_zero)
g1 = square_GF(0, 0, square_1nn)
g2 = square_GF(0, 0, square_2nn)
g3 = square_GF(0, 0, square_3nn)
self.assertAlmostEqual(-4 * g0 + 4 * g1, 1, places=6)
self.assertAlmostEqual(-4 * g1 + g0 + 2*g2 + g3, 0, places=6)
def testtria(self):
"""Test on triagonal"""
tria = crystal.Crystal(np.array([[1/2, 1/2], [-np.sqrt(3/4), np.sqrt(3/4)]]), [np.zeros(2)])
tria_sitelist = tria.sitelist(0)
tria_jumpnetwork = tria.jumpnetwork(0, 1.01)
tria_GF = GFcalc.GFCrystalcalc(tria, 0, tria_sitelist, tria_jumpnetwork, Nmax=4)
tria_GF.SetRates([1], [0], [1], [0])
tria_zero = np.zeros(2)
tria_1nn = np.array([1.,0.])
g0 = tria_GF(0, 0, tria_zero)
g1 = tria_GF(0, 0, tria_1nn)
self.assertAlmostEqual(-6 * g0 + 6 * g1, 1, places=6)
def testhoneycomb(self):
"""Test on honeycomb"""
honey = crystal.Crystal(np.array([[1/2, 1/2], [-np.sqrt(3/4), np.sqrt(3/4)]]),
[np.array([2/3, 1/3]), np.array([1/3, 2/3])])
honey_sitelist = honey.sitelist(0)
honey_jumpnetwork = honey.jumpnetwork(0, 0.6)
honey_GF = GFcalc.GFCrystalcalc(honey, 0, honey_sitelist, honey_jumpnetwork, Nmax=4)
honey_GF.SetRates([1], [0], [1], [0])
honey_zero = np.zeros(2)
honey_1nn = honey.pos2cart(np.zeros(2), (0, 1)) - honey.pos2cart(np.zeros(2), (0, 0))
g0 = honey_GF(0, 0, honey_zero)
g1 = honey_GF(0, 1, honey_1nn)
self.assertAlmostEqual(-3 * g0 + 3 * g1, 1, places=6)
def testBCC_B2(self):
"""Test that BCC and B2 produce the same GF"""
a0 = 1.
chem = 0
BCC = crystal.Crystal.BCC(a0)
BCC_sitelist = BCC.sitelist(chem)
BCC_jumpnetwork = BCC.jumpnetwork(chem, 0.87 * a0)
BCC_GF = GFcalc.GFCrystalcalc(BCC, chem, BCC_sitelist, BCC_jumpnetwork, Nmax=6)
BCC_GF.SetRates(np.ones(len(BCC_sitelist)), np.zeros(len(BCC_sitelist)),
2. * np.ones(len(BCC_jumpnetwork)), np.zeros(len(BCC_jumpnetwork)))
B2 = crystal.Crystal(a0 * np.eye(3), [np.zeros(3), np.array([0.45, 0.45, 0.45])])
B2_sitelist = B2.sitelist(chem)
B2_jumpnetwork = B2.jumpnetwork(chem, 0.99 * a0)
B2_GF = GFcalc.GFCrystalcalc(B2, chem, B2_sitelist, B2_jumpnetwork, Nmax=6)
B2_GF.SetRates(np.ones(len(B2_sitelist)), np.zeros(len(B2_sitelist)),
2. * np.ones(len(B2_jumpnetwork)), np.zeros(len(B2_jumpnetwork)))
veclist = [np.array([a0, 0, 0]), np.array([0, a0, 0]), np.array([0, 0, a0]),
np.array([-a0, 0, 0]), np.array([0, -a0, 0]), np.array([0, 0, -a0])]
for v1 in veclist:
for v2 in veclist:
# print('{}: '.format(v1+v2) + '{} vs {} vs {}'.format(B2_GF(0,0,v1+v2),B2_GF(1,1,v1+v2),BCC_GF(0,0,v1+v2)))
self.assertAlmostEqual(BCC_GF(0, 0, v1 + v2), B2_GF(0, 0, v1 + v2), places=5)
self.assertAlmostEqual(BCC_GF(0, 0, v1 + v2), B2_GF(1, 1, v1 + v2), places=5)
for jlist in B2_jumpnetwork:
for (i, j), dx in jlist:
# convert our B2 dx into a corresponding BCC dx:
BCCdx = (0.5 * a0) * np.round(dx / (0.5 * a0))
# print('({},{}), {} / {}: '.format(i,j,dx,BCCdx) + '{} vs {}'.format(B2_GF(i,j,dx), BCC_GF(0,0,BCCdx)))
self.assertAlmostEqual(BCC_GF(0, 0, BCCdx), B2_GF(i, j, dx), places=5)
def testPyrope(self):
"""Test using the pyrope structure: two disconnected symmetry-related networks"""
a0 = 1.
chem = 0
cutoff = 0.31*a0
alatt = a0 * np.array([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]])
invlatt = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
uMg = ((1 / 8, 0, 1 / 4), (3 / 8, 0, 3 / 4), (1 / 4, 1 / 8, 0), (3 / 4, 3 / 8, 0),
(0, 1 / 4, 1 / 8), (0, 3 / 4, 3 / 8), (7 / 8, 0, 3 / 4), (5 / 8, 0, 1 / 4),
(3 / 4, 7 / 8, 0), (1 / 4, 5 / 8, 0), (0, 3 / 4, 7 / 8), (0, 1 / 4, 5 / 8))
tovec = lambda x: np.dot(invlatt, x)
# this is a reduced version of pyrope: just the Mg (24c sites in 230)
# pyrope2 = half of the sites; makes for a single, connected network
pyropeMg = crystal.Crystal(alatt, [[vec(w) for w in uMg for vec in (tovec,)]], ['Mg'])
pyropeMg2 = crystal.Crystal(alatt, [[vec(w) for w in uMg[:6] for vec in (tovec,)]], ['Mg'])
sitelist = pyropeMg.sitelist(chem)
sitelist2 = pyropeMg2.sitelist(chem)
jumpnetwork = pyropeMg.jumpnetwork(chem, cutoff)
jumpnetwork2 = pyropeMg2.jumpnetwork(chem, cutoff)
self.assertEqual(len(jumpnetwork), 1)
self.assertEqual(len(jumpnetwork2), 1)
GF = GFcalc.GFCrystalcalc(pyropeMg, chem, sitelist, jumpnetwork)
GF2 = GFcalc.GFCrystalcalc(pyropeMg2, chem, sitelist2, jumpnetwork2)
GF.SetRates(np.ones(1), np.zeros(1), 0.25*np.ones(1), np.zeros(1)) # simple tracer
GF2.SetRates(np.ones(1), np.zeros(1), 0.25*np.ones(1), np.zeros(1)) # simple tracer
D0 = np.eye(3)*(1/64)
for D in (GF.D,GF2.D):
self.assertTrue(np.allclose(D0, D),
msg='Diffusivity does not match?\n{}\n!=\n{}'.format(D0,D))
basis = pyropeMg.basis[chem]
# order of testing: 000, 211
ijlist = ((0,0), (0,2))
dxlist = [np.dot(alatt, basis[j]-basis[i]) for (i,j) in ijlist]
glist = np.array([GF(i,j,dx) for (i,j), dx in zip(ijlist, dxlist)])
g2list = np.array([GF2(i,j,dx) for (i,j), dx in zip(ijlist, dxlist)])
Gref = np.array([2.30796022, 1.30807261])
self.assertTrue(np.allclose(glist, -Gref, rtol=1e-4),
msg='Does not match Carlson and Wilson values?\n{} !=\n{}'.format(glist, Gref))
# with the nearly disconnected, the rate anisotropy makes comparison of differences
# much more stable
self.assertTrue(np.allclose(glist, g2list, rtol=1e-12),
msg='Does not match single network GF values?\n{} !=\n{}'.format(glist, g2list))
for i in range(12):
for j in range(12):
dx = np.dot(alatt, basis[j]-basis[i])
if i//6 != j//6:
self.assertAlmostEqual(GF(i,j,dx), 0,
msg='Does not give disconnected networks? {},{}'.format(i,j))
else:
if i>=6: dxmap = -dx # inversion
else: dxmap = dx
self.assertAlmostEqual(GF(i,j,dx), GF2(i%6,j%6,dxmap),
msg='Does not match single network? {},{}'.format(i,j))
|
DallasTrinkle/Onsager
|
test/test_GFcalc.py
|
Python
|
mit
| 12,215
|
[
"CRYSTAL",
"Gaussian"
] |
98cc63dec1f85b89ab86c11bec8679f742231b2ee91ddd93f6de793b65876317
|
import os
import sys
import copy
import cPickle
import numpy as np
seed = np.random.randint(2**16)
# seed = 2958
# seed = 60017
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from optofit.cneuron.compartment import Compartment, SquidCompartment
from optofit.cneuron.channels import LeakChannel, NaChannel, KdrChannel
from optofit.cneuron.simulate import forward_euler
from optofit.cneuron.gpchannel import GPChannel, sigma
from hips.inference.particle_mcmc import *
from optofit.cinference.pmcmc import *
import kayak
import scipy
plot_progress = True
args = iter(sys.argv)
for line in args:
if line == "--seed":
seed = int(next(args))
elif line == "--no_graph":
plot_progress = False
# Set the random seed for reproducibility
np.random.seed(seed)
print "Seed: ", seed
# Make a simple compartment
hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.03,
'E_leak' : -65.0}
gp1_hypers = {'D': 2,
'sig' : 1,
'g_gp' : 12.0,
'E_gp' : 50.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 1.0}
gp2_hypers = {'D' : 1,
'sig' : 1,
'g_gp' : 3.60,
# 'g_gp' : 0,
'E_gp' : -77.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 1.0}
squid_hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.03,
'E_leak' : -65.0,
'g_na' : 12.0,
# 'g_na' : 0.0,
'E_na' : 50.0,
'g_kdr' : 3.60,
'E_kdr' : -77.0
}
def create_gp_model():
# Add a few channels
body = Compartment(name='body', hypers=hypers)
leak = LeakChannel(name='leak', hypers=hypers)
gp1 = GPChannel(name='gpna', hypers=gp1_hypers)
gp2 = GPChannel(name='gpk', hypers=gp2_hypers)
body.add_child(leak)
body.add_child(gp1)
body.add_child(gp2)
# Initialize the model
D, I = body.initialize_offsets()
return body, gp1, gp2, D, I
def sample_squid_model(start = 20, stop = 80, intensity = 7.):
squid_body = SquidCompartment(name='body', hypers=squid_hypers)
# squid_body = Compartment(name='body', hypers=squid_hypers)
# leak = LeakChannel(name='leak', hypers=squid_hypers)
# na = NaChannel(name='na', hypers=squid_hypers)
# kdr = KdrChannel(name='kdr', hypers=squid_hypers)
# squid_body.add_child(leak)
# body.add_child(na)
# squid_body.add_child(kdr)
# Initialize the model
D, I = squid_body.initialize_offsets()
# Set the recording duration
t_start = 0
t_stop = 600.
dt = 0.1
t = np.arange(t_start, t_stop, dt)
T = len(t)
inpt = np.zeros((T, I))
inpt[20/dt:40/dt,:] = 3.
inpt[120/dt:160/dt,:] = 5.
inpt[220/dt:280/dt,:] = 7.
inpt[300/dt:380/dt,:] = 9.
inpt[500/dt:599/dt,:] = 11.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
squid_body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# TODO: Fix the hack which requires us to know the number of particles
N = 100
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[squid_body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N, D, squid_body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([squid_body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,N,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.zeros((N,), dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Downsample
t_ds = 0.1
intvl = int(t_ds / dt)
td = t[::intvl].copy('C')
zd = z[::intvl, :].copy('C')
xd = x[::intvl, :].copy('C')
inptd = inpt[::intvl].copy('C')
st_axs = None
if(plot_progress):
# Plot the first particle trajectory
plt.ion()
st_axs, _ = squid_body.plot(td, zd, color='k')
# Plot the observed voltage
st_axs[0].plot(td, xd[:,0], 'r')
# plt.plot(t, x[:,0], 'r')
plt.show()
plt.pause(0.01)
return td, zd, xd, inptd, st_axs
def sample_gp_model():
body, gp1, gp2, D, I = create_gp_model()
# Set the recording duration
t_start = 0
t_stop = 100.
dt = 1.0
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[50/dt:60/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, 1, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,1,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
st_axs = None
if(plot_progress):
# Plot the first particle trajectory
st_axs, _ = body.plot(t, z, color='k')
# Plot the observed voltage
st_axs[0].plot(t, x[:,0], 'r')
# Plot the GP channel dynamics
# gp1_fig = plt.figure()
# gp1_ax1 = gp1_fig.add_subplot(121)
# gp1.plot(ax=gp1_ax1)
# gp1_ax2 = gp1_fig.add_subplot(122)
#
# gp2_fig = plt.figure()
# gp2_ax1 = gp2_fig.add_subplot(121)
# gp2.plot(ax=gp2_ax1)
# gp2_ax2 = gp2_fig.add_subplot(122)
plt.ion()
plt.show()
plt.pause(0.01)
return t, z, x, inpt, st_axs
# Now run the pMCMC inference
def sample_z_given_x(t, x, inpt,
z0=None,
initialize='constant',
N_particles=1000,
N_samples=100,
axs=None, gp1_ax=None, gp2_ax=None):
dt = np.diff(t)
T,O = x.shape
# Make a model
body, gp1, gp2, D, I = create_gp_model()
# Set the initial distribution to be Gaussian around the steady state
ss = np.zeros(D)
body.steady_state(ss)
init = GaussianInitialDistribution(ss, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# sigmas = np.ones(D)
sigmas = 0.2*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
# sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N_particles, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.ones((T,N_particles,D)) * ss[None, None, :] + np.random.randn(T,N_particles,D) * sigmas[None, None, :]
if z0 is not None:
if initialize == 'ground_truth':
logit = lambda zz: np.log(zz/(1-zz))
# Fix the observed voltage
z[:, 0, body.x_offset] = z0[:, 0]
# Fix the Na latent state
m = z0[:,1]
h = z0[:,2]
z[:,0, gp1.x_offset] = logit(np.clip(m**3 *h, 1e-4,1-1e-4))
# Fix the Kdr latent state
n = z0[:,3]
z[:,0, gp2.x_offset] = logit(np.clip(n**4, 1e-4, 1-1e-4))
else:
z[:,0,:] = z0
elif initialize == 'from_model':
# Sample the latent state sequence with the given initial condition
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Fix the observed voltage
z[i+1, 0, body.x_offset] = x[i+1, body.x_offset]
elif initialize == 'optimize':
# By default, optimize the latent state
# Set the voltage...
z[:, 0, body.x_offset] = x[:, body.x_offset]
# Set the initial latent trace
z[1:, 0, 1:] = initial_latent_trace(body, inpt, x[:, 0], t).transpose()
# Set the initial voltage
z[0, 0, 1:] = np.array([0, 0, 0])
else:
# Constant initialization
pass
# Initialize conductance values with MCMC to match the observed voltage...
# body.resample(t, z[:,0,:])
# resample_body(body, t, z[:,0,:], sigmas[0])
#
# if z0 is None:
# # Sample the latent state sequence with the given initial condition
# for i in np.arange(0,T-1):
# # The interface kinda sucks. We have to tell it that
# # the first particle is always its ancestor
# prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Resample the Gaussian processes
# gp1.resample(z[:,0,:], dt)
# gp2.resample(z[:,0,:], dt)
# Prepare the particle Gibbs sampler with the first particle
pf = ParticleGibbsAncestorSampling(T, N_particles, D)
pf.initialize(init, prop, lkhd, x, z[:,0,:].copy('C'))
if (plot_progress):
# Plot the initial state
gp1_ax, im1, l_gp1 = gp1.plot(ax=gp1_ax, data=z[:,0,:])
gp2_ax, im2, l_gp2 = gp2.plot(ax=gp2_ax, data=z[:,0,:])
axs, lines = body.plot(t, z[:,0,:], color='b', axs=axs)
axs[0].plot(t, x[:,0], 'r')
# Update figures
for i in range(1,4):
plt.figure(i)
plt.pause(0.001)
# Initialize sample outputs
z_smpls = np.zeros((N_samples,T,D))
z_smpls[0,:,:] = z[:,0,:]
gp1_smpls = []
gp2_smpls = []
# Resample observation noise
# eta_sqs = resample_observation_noise(z_smpls[0,:,:], x)
# lkhd.set_etasq(eta_sqs)
for s in range(1,N_samples):
print "Iteration %d" % s
# raw_input("Press enter to continue\n")
# Reinitialize with the previous particle
pf.initialize(init, prop, lkhd, x, z_smpls[s-1,:,:])
# Sample a new trajectory given the updated kinetics and the previous sample
z_smpls[s,:,:] = pf.sample()
# z_smpls[s,:,:] = z_smpls[s-1,:,:]
# print "dz: ", (z_smpls[s,:,:] - z_smpls[s-1,:,:]).sum(0)
# Resample the GP
gp1.resample(z_smpls[s,:,:], dt)
gp2.resample(z_smpls[s,:,:], dt)
# Resample the noise levels
sigmasq = resample_transition_noise(body, z_smpls[s,:,:], inpt, t)
# HACK: Fix the voltage transition noise
# sigmasq[0] = 0.5
print "Sigmasq: ", sigmasq
# prop.set_sigmasq(sigmasq)
gp1.set_sigmas(sigmasq)
gp2.set_sigmas(sigmasq)
# gp1.resample_transition_noise(z_smpls[s, :, :], t)
# gp2.resample_transition_noise(z_smpls[s, :, :], t)
# eta_sqs = resample_observation_noise(z_smpls[s,:,:], x)
# lkhd.set_etasq(eta_sqs)
# Resample the conductances
# resample_body(body, t, z_smpls[s,:,:], sigmas[0])
if(plot_progress):
# Plot the sample
body.plot(t, z_smpls[s,:,:], lines=lines)
gp1.plot(im=im1, l=l_gp1, data=z_smpls[s,:,:])
gp2.plot(im=im2, l=l_gp2, data=z_smpls[s,:,:])
# Update figures
for i in range(1,4):
plt.figure(i)
plt.pause(0.001)
gp1_smpls.append(gp1.gps)
gp2_smpls.append(gp2.gps)
freq = 1
if s % freq == 0:
with open('squid' + str(seed) + '_results' + str(s / freq) + '.pkl', 'w') as f:
cPickle.dump((z_smpls, gp1_smpls, gp2_smpls), f, protocol=-1)
if(s / freq > 1):
os.remove('squid' + str(seed) + '_results' + str((s / freq) - 1) + '.pkl')
z_mean = z_smpls.mean(axis=0)
z_std = z_smpls.std(axis=0)
z_env = np.zeros((T*2,2))
z_env[:,0] = np.concatenate((t, t[::-1]))
z_env[:,1] = np.concatenate((z_mean[:,0] + z_std[:,0], z_mean[::-1,0] - z_std[::-1,0]))
if(plot_progress):
plt.ioff()
plt.show()
return z_smpls, gp1_smpls, gp2_smpls
def resample_transition_noise(body, data, inpt, t,
alpha0=100, beta0=100):
"""
Resample sigma, the transition noise variance, under an inverse gamma prior
"""
# import pdb; pdb.set_trace()
Xs = []
X_preds = []
X_diffs = []
T = data.shape[0]
D = data.shape[1]
dxdt = np.zeros((T,1,D))
x = np.zeros((T,1,D))
x[:,0,:] = data
# Compute kinetics of the voltage
body.kinetics(dxdt, x, inpt, np.arange(T-1).astype(np.int32))
dt = np.diff(t)
# TODO: Loop over data
dX_pred = dxdt[:-1, 0, :]
dX_data = (data[1:, :] - data[:-1, :]) / dt[:,None]
X_diffs = dX_pred - dX_data
# Resample transition noise.
X_diffs = np.array(X_diffs)
n = X_diffs.shape[0]
sigmasq = np.zeros(D)
for d in range(D):
alpha = alpha0 + n / 2.0
beta = beta0 + np.sum(X_diffs[:,d] ** 2) / 2.0
# self.sigmas[d] = beta / alpha
sigmasq[d] = 1.0 / np.random.gamma(alpha, 1.0/beta)
# print "Sigma V: %.3f" % (sigmas[d])
return sigmasq
def resample_observation_noise(z, x,
alpha0=1.0, beta0=1.0):
"""
Resample sigma, the transition noise variance, under an inverse gamma prior
"""
# TODO: Iterate over obs dimensions. For now assume 1d
V_pred = z[:,0]
V_data = x[:,0]
V_diff = V_pred - V_data
# Resample transition noise.
n = V_diff.shape[0]
alpha = alpha0 + n / 2.0
beta = beta0 + np.sum(V_diff ** 2) / 2.0
etasq = 1.0 / np.random.gamma(alpha, 1.0/beta)
print "eta V: %.3f" % (etasq)
return np.array([etasq])
from hips.inference.mh import mh
def resample_body(body, ts=[], datas=[], sigma=1.0):
"""
Resample the conductances of this neuron.
"""
assert isinstance(datas, list) or isinstance(datas, np.ndarray)
if isinstance(datas, np.ndarray):
datas = [datas]
if isinstance(ts, np.ndarray):
ts = [ts]
Is = []
dV_dts = []
# Compute I and dV_dt for each dataset
for t,data in zip(ts, datas):
# Compute dV dt
T = data.shape[0]
V = data[:,body.x_offset]
dV_dt = (V[1:] - V[:-1])/(t[1:] - t[:-1])
dV_dts.append(dV_dt[:,None])
# Compute the (unscaled) currents through each channel
I = np.empty((T-1, len(body.children)))
for m,c in enumerate(body.children):
for i in range(T-1):
I[i,m] = c.current(data[:,None,:].copy('C'), V[i], i, 0)
Is.append(I)
# Concatenate values from all datasets
dV_dt = np.vstack(dV_dts)
I = np.vstack(Is)
# Now do a nonnegative regression of dVdt onto I
gs = 0.1 * np.ones(len(body.children))
perm = np.random.permutation(len(body.children))
# Define a helper function to compute the log likelihood and make MH proposals
def _logp(m, gm):
gtmp = gs.copy()
gtmp[m] = gm
dV_dt_pred = I.dot(gtmp)
return (-0.5/sigma * (dV_dt_pred - dV_dt)**2).sum()
# Define a metropolis hastings proposal
def _q(x0, xf):
lx0, lxf = np.log(x0), np.log(xf)
return -0.5 * (lx0-lxf)**2
def _sample_q(x0):
lx0 = np.log(x0)
xf = np.exp(lx0 + np.random.randn())
return xf
# Sample each channel in turn
for m in perm:
gs[m] = mh(gs[m], lambda g: _logp(m, g), _q, _sample_q, steps=10)[-1]
for c,g in zip(body.children, gs):
c.g = g
print "Gs: ", gs
def initial_latent_trace(body, inpt, voltage, t):
I_true = np.diff(voltage) * body.C
T = I_true.shape[0]
gs = np.diag([c.g for c in body.children])
D = int(sum([c.D for c in body.children]))
driving_voltage = np.dot(np.ones((len(body.children), 1)), np.array([voltage]))[:, :T]
child_i = 0
for i in range(D):
driving_voltage[i, :] = voltage[:T] - body.children[child_i].E
K = np.array([[max(i-j, 0) for i in range(T)] for j in range(T)])
K = K.T + K
K = -1*(K ** 2)
K = np.exp(K / 2)
L = np.linalg.cholesky(K + (1e-7) * np.eye(K.shape[0]))
Linv = scipy.linalg.solve_triangular(L.transpose(), np.identity(K.shape[0]))
N = 1
batch_size = 5000
learn = .0000001
runs = 10000
batcher = kayak.Batcher(batch_size, N)
inputs = kayak.Parameter(driving_voltage)
targets = kayak.Targets(np.array([I_true]), batcher)
g_params = kayak.Parameter(gs)
I_input = kayak.Parameter(inpt.T[:, :T])
Kinv = kayak.Parameter(np.dot(Linv.transpose(), Linv))
initial_latent = np.random.randn(D, T)
latent_trace = kayak.Parameter(initial_latent)
sigmoid = kayak.Logistic(latent_trace)
quadratic = kayak.ElemMult(
sigmoid,
kayak.MatMult(
kayak.Parameter(np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])),
sigmoid
)
)
three_quadratic = kayak.MatMult(
kayak.Parameter(np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0]])),
quadratic
)
linear = kayak.MatMult(
kayak.Parameter(np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 1]])),
sigmoid
)
leak_open = kayak.Parameter(np.vstack((np.ones((1, T)), np.ones((2, T)))))
open_fractions = kayak.ElemAdd(leak_open, kayak.ElemAdd(three_quadratic, linear))
I_channels = kayak.ElemMult(
kayak.MatMult(g_params, inputs),
open_fractions
)
I_ionic = kayak.MatMult(
kayak.Parameter(np.array([[1, 1, 1]])),
I_channels
)
predicted = kayak.MatAdd(I_ionic, I_input)
nll = kayak.ElemPower(predicted - targets, 2)
hack_vec = kayak.Parameter(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]))
kyk_loss = kayak.MatSum(nll) + kayak.MatMult(
kayak.Reshape(
kayak.MatMult(
kayak.MatMult(latent_trace, Kinv),
kayak.Transpose(latent_trace)
),
(9,)
),
hack_vec
) + kayak.MatSum(kayak.ElemPower(I_channels, 2))
grad = kyk_loss.grad(latent_trace)
for ii in xrange(runs):
for batch in batcher:
loss = kyk_loss.value
if ii % 100 == 0:
print ii, loss, np.sum(np.power(predicted.value - I_true, 2)) / T
grad = kyk_loss.grad(latent_trace) + .5 * grad
latent_trace.value -= learn * grad
return sigmoid.value
# Sample data from either a GP model or a squid compartment
# t, z, x, inpt, st_axs = sample_gp_model()
t, z, x, inpt, st_axs = sample_squid_model()
with open('squid_' + str(seed) + '_ground.pkl', 'w') as f:
cPickle.dump((t, z, x, inpt), f)
# raw_input("Press enter to being sampling...\n")
# sample_z_given_x(t, x, inpt, z0=z, axs=st_axs)
z_smpls, gp1_smpls, gp2_smpls = sample_z_given_x(t, x, inpt, N_samples=1000, axs=st_axs, initialize='optimize')
# sample_z_given_x(t, x, inpt, axs=st_axs, z0=z, initialize='ground_truth')
# sample_z_given_x(t, x, inpt, axs=st_axs, initialize='optimize')
with open('squid_' + str(seed) + '_results.pkl', 'w') as f:
cPickle.dump((z_smpls, gp1_smpls, gp2_smpls), f)
|
HIPS/optofit
|
examples/two_gp_demo.py
|
Python
|
gpl-2.0
| 21,174
|
[
"Gaussian",
"NEURON"
] |
8141f95523d973a1274aa41a46328bc953adcd03a38cddcba80543f819bad7f2
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 20 08:58:56 2014
Script to generate intro-example.odml
@author: zehl
"""
import odml
import datetime
odmlrepo = 'http://portal.g-node.org/odml/terminologies/v1.0/terminologies.xml'
# CREATE A DOCUMENT
doc = odml.Document(author="D. N. Adams",
date=datetime.date(1979, 10, 12),
version=42)
# repository=odmlrepo)
# CREATE AND APPEND THE MAIN SECTIONs
doc.append(odml.Section(name="TheCrew",
definition="Information on the crew",
type="crew"))
doc.append(odml.Section(name="TheStarship",
definition="Information on the crew",
type="crew"))
# SET NEW PARENT NODE
parent = doc['TheCrew']
# APPEND SUBSECTIONS
parent.append(odml.Section(name="Arthur Philip Dent",
type="crew/person",
definition="Information on Arthur Dent"))
parent.append(odml.Section(name="Zaphod Beeblebrox",
type="crew/person",
definition="Information on Zaphod Beeblebrox"))
parent.append(odml.Section(name="Tricia Marie McMillan",
type="crew/person",
definition="Information on Trillian Astra"))
parent.append(odml.Section(name="Ford Prefect",
type="crew/person",
definition="Information on Ford Prefect"))
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="NameCrewMembers",
value=[odml.Value(data="Arthur Philip Dent",
dtype=odml.DType.person),
odml.Value(data="Zaphod Beeblebrox",
dtype=odml.DType.person),
odml.Value(data="Tricia Marie McMillan",
dtype=odml.DType.person),
odml.Value(data="Ford Prefect",
dtype=odml.DType.person)],
definition="List of crew members names"))
parent.append(odml.Property(name="NoCrewMembers",
value=odml.Value(data=4,
dtype=odml.DType.int),
definition="Number of crew members"))
# SET NEW PARENT NODE
parent = doc['TheCrew']['Arthur Philip Dent']
# APPEND SUBSECTIONS
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="Species",
value=odml.Value(data="Human",
dtype=odml.DType.string),
definition="Species to which subject belongs to"))
parent.append(odml.Property(name="Nickname",
value=odml.Value(data="The sandwich-maker",
dtype=odml.DType.string),
definition="Nickname(s) of the subject"))
parent.append(odml.Property(name="Occupation",
value=odml.Value(data="-",
dtype=odml.DType.string),
definition="Occupation of the subject"))
parent.append(odml.Property(name="Gender",
value=odml.Value(data="male",
dtype=odml.DType.string),
definition="Sex of the subject"))
parent.append(odml.Property(name="HomePlanet",
value=odml.Value(data="Earth",
dtype=odml.DType.string),
definition="Home planet of the subject"))
# SET NEW PARENT NODE
parent = doc['TheCrew']['Zaphod Beeblebrox']
# APPEND SUBSECTIONS
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="Species",
value=odml.Value(data="Betelgeusian",
dtype=odml.DType.string),
definition="Species to which subject belongs to"))
parent.append(odml.Property(name="Nickname",
value=odml.Value(data="-",
dtype=odml.DType.string),
definition="Nickname(s) of the subject"))
parent.append(odml.Property(name="Occupation",
value=odml.Value(data="Ex-Galactic President",
dtype=odml.DType.string),
definition="Occupation of the subject"))
parent.append(odml.Property(name="Gender",
value=odml.Value(data="male",
dtype=odml.DType.string),
definition="Sex of the subject"))
parent.append(odml.Property(name="HomePlanet",
value=odml.Value(data="A planet in the vicinity "
"of Betelgeuse",
dtype=odml.DType.string),
definition="Home planet of the subject"))
# SET NEW PARENT NODE
parent = doc['TheCrew']['Tricia Marie McMillan']
# APPEND SUBSECTIONS
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="Species",
value=odml.Value(data="Betelgeusian",
dtype=odml.DType.string),
definition="Species to which subject belongs to"))
parent.append(odml.Property(name="Nickname",
value=odml.Value(data="Trillian Astra",
dtype=odml.DType.string),
definition="Nickname(s) of the subject"))
parent.append(odml.Property(name="Occupation",
value=odml.Value(data="-",
dtype=odml.DType.string),
definition="Occupation of the subject"))
parent.append(odml.Property(name="Gender",
value=odml.Value(data="female",
dtype=odml.DType.string),
definition="Sex of the subject"))
parent.append(odml.Property(name="HomePlanet",
value=odml.Value(data="Earth",
dtype=odml.DType.string),
definition="Home planet of the subject"))
# SET NEW PARENT NODE
parent = doc['TheCrew']['Ford Prefect']
# APPEND SUBSECTIONS
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="Species",
value=odml.Value(data="Betelgeusian",
dtype=odml.DType.string),
definition="Species to which subject belongs to"))
parent.append(odml.Property(name="Nickname",
value=odml.Value(data="Ix",
dtype=odml.DType.string),
definition="Nickname(s) of the subject"))
parent.append(odml.Property(name="Occupation",
value=odml.Value(data="Researcher for the "
"Hitchhiker's Guide to the "
"Galaxy",
dtype=odml.DType.string),
definition="Occupation of the subject"))
parent.append(odml.Property(name="Gender",
value=odml.Value(data="male",
dtype=odml.DType.string),
definition="Sex of the subject"))
parent.append(odml.Property(name="HomePlanet",
value=odml.Value(data="A planet in the vicinity "
"of Betelgeuse",
dtype=odml.DType.string),
definition="Home planet of the subject"))
# SET NEW PARENT NODE
parent = doc['TheStarship']
# APPEND SUBSECTIONS
parent.append(odml.Section(name='Cybernetics',
type="starship/cybernetics",
definition="Information on cybernetics present on "
"the ship"))
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="Name",
value=odml.Value(data="Heart of Gold",
dtype=odml.DType.string),
definition="Name of person/device"))
parent.append(odml.Property(name="OwnerStatus",
value=odml.Value(data="stolen",
dtype=odml.DType.string),
definition="Owner status of device"))
parent.append(odml.Property(name="DriveType",
value=odml.Value(data="Infinite Propability Drive",
dtype=odml.DType.string),
definition="Type of drive"))
parent.append(odml.Property(name="Technology",
value=odml.Value(data="secret",
dtype=odml.DType.string),
definition="Technology used to built device"))
parent.append(odml.Property(name="Length",
value=odml.Value(data=150.00,
dtype=odml.DType.float,
unit='m'),
definition="Length of device"))
parent.append(odml.Property(name="Shape",
value=odml.Value(data="various",
dtype=odml.DType.string),
definition="Shape of device"))
parent.append(odml.Property(name="FactoryPlanet",
value=odml.Value(data="Damogran",
dtype=odml.DType.string),
definition="Planet where device was constructed"))
# SET NEW PARENT NODE
parent = doc['TheStarship']['Cybernetics']
# APPEND SUBSECTIONS
parent.append(odml.Section(name='Marvin',
type="starship/cybernetics",
definition="Information on Marvin"))
parent.append(odml.Section(name='Eddie',
type="starship/cybernetics",
definition="Information on Eddie"))
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="RobotType",
value=odml.Value(data="Genuine People "
"Personalities",
dtype=odml.DType.string),
definition="Type of robots"))
parent.append(odml.Property(name="Manufacturer",
value=odml.Value(data="Sirius Cybernetics "
"Corporation",
dtype=odml.DType.string),
definition="Manufacturer of robots"))
parent.append(odml.Property(name="NoOfCybernetics",
value=odml.Value(data=2,
dtype=odml.DType.int),
definition="Number of cybernetic robots on the "
"ship"))
homedir = "/home/zehl/Projects/toolbox/"
save_to = homedir + "/python-odml/doc/example_odMLs/THGTTG.odml"
odml.tools.xmlparser.XMLWriter(doc).write_file(save_to)
|
carloscanova/python-odml
|
doc/example_odMLs/thgttg.py
|
Python
|
bsd-3-clause
| 11,921
|
[
"Galaxy"
] |
3043ac62ce4bf6fcf10553aa031c65db5bd0ff05c0f2b80c359dd747d0126a2e
|
#How to extract histogram: http://stackoverflow.com/questions/22159160/python-calculate-histogram-of-image
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
import cv2
from skimage.color import rgb2gray
from skimage import exposure
import skimage.io as io
import numpy as np
import collections
train_file = '../images/sampling/train-imgs.txt'
test_file = '../images/sampling/test-imgs.txt'
Dataset = collections.namedtuple('Dataset', ['data', 'target'], verbose=True)
def loaddataset(train_file):
n_samples = sum(1 for line in open(train_file))
with open(train_file, "r") as ins:
n_features = 255
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
i = 0
for line in ins:
line = line.rstrip().split()
filename = line[0]
im = io.imread(filename)
img_gray = rgb2gray(im)
counts, bins = np.histogram(img_gray, range(256))
data[i] = np.asarray(counts, dtype=np.float64)
target[i] = np.asarray(line[1], dtype=np.int)
i = i+1
return Dataset(data=data, target=target)
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.mixture import BayesianGaussianMixture
from sklearn.linear_model import LogisticRegression
dataset = loaddataset(train_file)
testset = loaddataset(test_file)
names = ["Nearest Neighbors", "RBF SVM",
"Decision Tree", "Random Forest", "AdaBoost",
]
ab=AdaBoostClassifier(random_state=1)
bgm=BayesianGaussianMixture(random_state=1)
dt=DecisionTreeClassifier(random_state=1)
gb=GradientBoostingClassifier(random_state=1)
lr=LogisticRegression(random_state=1)
rf=RandomForestClassifier(random_state=1)
classifiers = [
KNeighborsClassifier(3),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
RandomForestClassifier(random_state=1),
GaussianNB(),
QuadraticDiscriminantAnalysis()
]
svcl=LinearSVC(random_state=1)
svcg=SVC(random_state=1)
gnb=GaussianNB(2)
qda=QuadraticDiscriminantAnalysis(2)
names=['svcl','lr']
classifiers=[svcl,lr]
params = [
{'C':[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]},
{'C':[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]}
]
names=['gp','nb','qda']
classifiers=[GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True, random_state=1), GaussianNB(), QuadraticDiscriminantAnalysis()]
for name, clf in zip(names, classifiers):
scores = cross_val_score(clf, dataset.data, dataset.target, cv=5)
print("%s, accuracy: %0.4f (+/- %0.4f)" % (name, scores.mean(), scores.std() * 2))
ab=AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=0.7, n_estimators=100, random_state=1)
dt=DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=10, max_features=75, max_leaf_nodes=None, min_impurity_split=1e-07, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=1, splitter='best')
gb=GradientBoostingClassifier(criterion='friedman_mse', init=None, learning_rate=0.1, loss='deviance', max_depth=5, max_features=None, max_leaf_nodes=None, min_impurity_split=1e-07, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, presort='auto', random_state=1, subsample=1.0, verbose=0, warm_start=False)
rf=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=10, max_features=10, max_leaf_nodes=None, min_impurity_split=1e-07, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=50, n_jobs=1, oob_score=False, random_state=1, verbose=0, warm_start=False)
clfs = [('ab',ab),('dt',dt),('gb',gb),('rf',rf)]
##Nearest Neighbors, accuracy: 0.91 (+/- 0.01)
##RBF SVM, accuracy: 0.90 (+/- 0.03)
#Decision Tree, accuracy: 0.91 (+/- 0.02)
#Random Forest, accuracy: 0.91 (+/- 0.02)
#AdaBoost, accuracy: 0.91 (+/- 0.02)
#Naive Bayes, accuracy: 0.61 (+/- 0.03)
#QDA, accuracy: 0.62 (+/- 0.02)
#Linear SVM, accuracy: 0.85 (+/- 0.05)
#Gaussian Process, accuracy: 0.80 (+/- 0.19)
#Neural Net, accuracy: 0.74 (+/- 0.35)
|
jmrozanec/white-bkg-classification
|
scripts/05-histogram-random-forest.py
|
Python
|
apache-2.0
| 5,046
|
[
"Gaussian"
] |
1723bd91365e199fe3932da971014a092f7e10870da1451659af7b700309c665
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abinit Post Process Application
author: Martin Alexandre
last edited: May 2013
"""
import sys,os,commands,threading
import string, math, re
#GUI
import gui.graph as Graph
import gui.md as MD
import gui.gs as GS
import gui.elasticConstant as ElasticConstant
import gui.conv as Conv
import gui.about as About
import gui.loading as load
#Reading
import reading.read as Read
#Utility
import utility.write as Write
import utility.thread as thread
import utility.global_variable as var
from PyQt4 import Qt,QtGui,QtCore
import numpy as np
#---------------------------------------------------#
#---------------------------------------------------#
#----------------MAIN FRAME-------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
class MainFrame(QtGui.QMainWindow):
def __init__(self):
super(MainFrame, self).__init__()
self.conv = Conv.Conversion()
self.connect(self.conv, QtCore.SIGNAL("myCustomizedSignal(PyQt_PyObject)"), self.changeUnits)
self.units = self.conv.getUnits()
self.initUI()
def initUI(self):
#----------MainFrame parameters----------#
self.setWindowTitle("APPA "+var.version)
self.setFixedSize(700, 550)
self.center()
#-------------------------------------#
#---------------Creation of menubar----------------------------------#
self.open = QtGui.QAction( '&Open', self)
self.open.setShortcut('Ctrl+O')
self.open.setStatusTip('Open File')
self.connect(self.open, QtCore.SIGNAL('triggered()'), self.showDialog)
self.close = QtGui.QAction('&Exit', self)
self.close.setShortcut('Ctrl+Q')
self.close.setStatusTip('Exit application')
self.connect(self.close, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
self.save = QtGui.QAction('&Save', self)
self.save.setShortcut('Ctrl+S')
self.save.setStatusTip('Save simulation data')
self.connect(self.save, QtCore.SIGNAL('triggered()'), self.showSave)
self.export = QtGui.QAction('E&xport (.xyz)', self)
self.export.setShortcut('Ctrl+X')
self.export.setStatusTip('Export data to XYZ file')
self.connect(self.export, QtCore.SIGNAL('triggered()'), self.showExport)
self.menubar = self.menuBar()
self.fileMenu1 = self.menubar.addMenu('&File')
self.fileMenu1.addAction(self.open)
self.fileMenu1.addAction(self.save)
self.fileMenu1.addAction(self.export)
self.fileMenu1.addAction(self.close)
self.ec = QtGui.QAction( '&Elastics constants', self)
self.ec.setShortcut('Ctrl+E')
self.ec.setStatusTip('Calculation of Elastics constants')
self.connect(self.ec, QtCore.SIGNAL('triggered()'), self.showElastics)
self.fileMenu2 = self.menubar.addMenu('&Calculation')
self.fileMenu2.addAction(self.ec)
self.unit = QtGui.QAction( '&Units', self)
self.unit.setShortcut('Ctrl+U')
self.unit.setStatusTip('change physical units')
self.connect(self.unit, QtCore.SIGNAL('triggered()'), self.showConv)
self.fileMenu3 = self.menubar.addMenu('&Option')
self.fileMenu3.addAction(self.unit)
self.about = QtGui.QAction( '&About', self)
self.about.setShortcut('Ctrl+A')
self.about.setStatusTip('About software')
self.connect(self.about, QtCore.SIGNAL('triggered()'), self.showAbout)
self.fileMenu3 = self.menubar.addMenu('&APPA')
self.fileMenu3.addAction(self.about)
#---------------------------------------------------------------------#
#----------------Creation of statusBar--------------------#
self.setStatusBar(QtGui.QStatusBar())
#---------------------------------------------------------#
#-------Creation of CentralWidget-----------------------------------------#
self.widget = QtGui.QWidget()
self.widget_layout = QtGui.QGridLayout()
self.widget.setLayout(self.widget_layout)
self.box1 = QtGui.QGroupBox()
self.box1layout = QtGui.QGridLayout()
self.box1.setLayout(self.box1layout)
self.lbltitle = QtGui.QLabel("Abinit Post-Process Application")
self.lbltitle.setFont(QtGui.QFont("calibri", 25))
self.lbltitle.setFixedWidth(520);
self.box1layout.addWidget(self.lbltitle,1,0)
self.tab = QtGui.QTabWidget()
self.tab.setTabsClosable (True)
self.connect(self.tab,QtCore.SIGNAL('tabCloseRequested (int)'),self.closeTab)
self.tab.setTabPosition(1)
#----------Try to open the last .nc and .HIST files------------#
MD_file = Read.MolecularDynamicFile("")
if MD_file.isGoodFile():
self.page1 = MD.Netcdf_MD(MD_file,self.units)
self.tab.addTab(self.page1,MD_file.getNameFile())
#----------Try to open the last Ground State file(BETA)--------#
#GS_file = Read.outputFile("")
#if GS_file.isGoodFile():
# self.page2 = GS.Ouput_GS(GS_file,self.units)
# self.tab.addTab(self.page2,str(GS_file.getNameFile()))
#Connection of Signal (for the threading):
self.connect(self,QtCore.SIGNAL("Reading(PyQt_PyObject)"),self.add)
self.widget_layout.addWidget(self.box1,1,0,1,2)
self.widget_layout.addWidget(self.tab,2,0,5,2)
self.setCentralWidget(self.widget)
#------------------------------------------------------------------------#
self.show()
if self.tab.count() == 0:
self.showDialog()
#----------------------------------Methods---------------------------------------------#
def showDialog(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open file', var.path(), "FILE (*_HIST *_OUT.nc *.out*)")
pathFile=str(path)
var.global_path = pathFile
del path
if pathFile !="":
if pathFile.find(' ') != -1 :
#Sometimes the space caracter in the pathfile have to be replace by '\ '
if os.path.exists(pathFile.replace(' ','\ ')):
pathFile = pathFile.replace(' ','\ ')
#Check the existence of the both Netcdf Files before reading:
if ( pathFile.find('_OUT.nc') != -1 and os.path.exists(pathFile.replace('_OUT.nc','_HIST')) )\
or ( pathFile.find('_HIST') != -1 and os.path.exists(pathFile.replace('_OUT.nc','_OUT.nc')) ):
self.read = threading.Thread(target=self.read, args=(pathFile,))
self.read.setDaemon(True)
self.read.start()
del self.read
self.progressbar = load.loading(message="Reading output")
self.progressbar.show()
self.progressbar.raise_()
return
#Read the ASCII FILE:
elif (pathFile.find('.out') != -1):
self.read = threading.Thread(target=self.read, args=(pathFile,))
self.read.setDaemon(True)
self.read.start()
del self.read
self.progressbar = load.loading(message="Reading output")
self.progressbar.show()
self.progressbar.raise_()
return
#------------BETA------------#
# TEMP = Read.outputFile(pathFile)
# if TEMP.isGoodFile():
# self.GS_page = GS.Ouput_GS(TEMP,self.units)
# self.tab.addTab(self.GS_page,str(TEMP.getNameFile()))
# self.tab.setCurrentIndex(self.tab.indexOf(self.GS_page))
# return
else:
self.showError("This file can't be read by APPA")
def read(self,pathFile):
TEMP = Read.MolecularDynamicFile(pathFile)
if TEMP.isGoodFile():
self.emit(QtCore.SIGNAL("Reading(PyQt_PyObject)"), TEMP)
try:
del self.progressbar
except:
pass;
def add(self,pfile):
MD_page = MD.Netcdf_MD(pfile,self.units)
self.tab.addTab(MD_page,str(pfile.getNameFile()))
self.tab.setCurrentIndex(self.tab.indexOf(MD_page))
return
def showElastics(self):
self.page3 = ElasticConstant.Elastic()
self.tab.addTab(self.page3,"Elastic constant")
self.tab.setCurrentIndex(self.tab.indexOf(self.page3))
def showAbout(self):
self.aboutPage = About.About()
self.aboutPage.raise_()
def showSave(self):
fname = QtGui.QFileDialog.getSaveFileName(self,"Save Graphics",os.getcwd(), "FILE")
if (fname !=""):
print 'test'+fname
try:
Write.SaveFile(fname).saveData(self.tab.currentWidget().getData())
except:
self.showError("This file is not correct or no file open")
def showExport(self):
try:
fname = QtGui.QFileDialog.getSaveFileName(self,"Export data",os.getcwd(), "XYZ file (*.xyz)")
if (fname !=""):
if 'xyz' in fname.split('.'):
pass
else:
fname += '.xyz'
pos = (self.tab.currentWidget().getFile()).getXCart() * 0.5291772085936 # Angstrom
acell = (self.tab.currentWidget().getFile()).getAcell() * 0.5291772085936 # Angstrom
typat = (self.tab.currentWidget().getFile()).getTypat()
znucl = (self.tab.currentWidget().getFile()).getZnucl()
Write.SaveFile(fname).xyzFormat(pos,acell,typat,znucl)
except:
self.showError("This file is not molecular dynamics file")
def showError(self,perror):
QtGui.QMessageBox.critical(self,"Warning",perror)
def showConv(self):
self.conv.showUnits()
self.conv.raise_()
def changeUnits(self,punits):
self.units = punits
for i in range(self.tab.count()):
self.tab.widget(i).updateUnits(self.units)
def closeTab(self,index):
reply = QtGui.QMessageBox.question(self, 'Warning',
"Are you sure you want to close?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
try:
self.tab.currentWidget().restart()
self.tab.currentWidget().closeGraphic()
except:
pass
self.tab.removeTab(index)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Warning',
"Are you sure you want to quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
sys.exit(0)
event.accept()
else:
event.ignore()
def center(self):
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2, (screen.height()-size.height())/2)
#----------------------------------------------------------------------------------------------------#
|
SamKChang/abinit-7.10.5_multipole
|
scripts/post_processing/appa/gui/main_frame.py
|
Python
|
gpl-3.0
| 11,569
|
[
"ABINIT",
"NetCDF"
] |
1ad3cadcf801918235b771ccde74e3ed724ede8b82b61ae812f21812938b6e95
|
#!/usr/bin/env python
""" update local cfg
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
from diraccfg import CFG
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [options]' % Script.scriptName]))
Script.registerSwitch('F:', 'file=', "set the cfg file to update.")
Script.registerSwitch('V:', 'vo=', "set the VO.")
Script.registerSwitch('S:', 'setup=', "set the software dist module to update.")
Script.registerSwitch('D:', 'softwareDistModule=', "set the software dist module to update.")
Script.parseCommandLine()
args = Script.getPositionalArgs()
from DIRAC import gConfig
cFile = ''
sMod = ''
vo = ''
setup = ''
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("F", "file"):
cFile = unprocSw[1]
if unprocSw[0] in ("V", "vo"):
vo = unprocSw[1]
if unprocSw[0] in ("D", "softwareDistModule"):
sMod = unprocSw[1]
if unprocSw[0] in ("S", "setup"):
setup = unprocSw[1]
localCfg = CFG()
if cFile:
localConfigFile = cFile
else:
print("WORKSPACE: %s" % os.path.expandvars('$WORKSPACE'))
if os.path.isfile(os.path.expandvars('$WORKSPACE') + '/PilotInstallDIR/etc/dirac.cfg'):
localConfigFile = os.path.expandvars('$WORKSPACE') + '/PilotInstallDIR/etc/dirac.cfg'
elif os.path.isfile(os.path.expandvars('$WORKSPACE') + '/ServerInstallDIR/etc/dirac.cfg'):
localConfigFile = os.path.expandvars('$WORKSPACE') + '/ServerInstallDIR/etc/dirac.cfg'
elif os.path.isfile('./etc/dirac.cfg'):
localConfigFile = './etc/dirac.cfg'
else:
print("Local CFG file not found")
exit(2)
localCfg.loadFromFile(localConfigFile)
if not localCfg.isSection('/LocalSite'):
localCfg.createNewSection('/LocalSite')
localCfg.setOption('/LocalSite/CPUTimeLeft', 5000)
localCfg.setOption('/DIRAC/Security/UseServerCertificate', False)
if not sMod:
if not setup:
setup = gConfig.getValue('/DIRAC/Setup')
if not setup:
setup = 'dirac-JenkinsSetup'
if not vo:
vo = gConfig.getValue('/DIRAC/VirtualOrganization')
if not vo:
vo = 'dirac'
if not localCfg.isSection('/DIRAC/VOPolicy'):
localCfg.createNewSection('/DIRAC/VOPolicy')
if not localCfg.isSection('/DIRAC/VOPolicy/%s' % vo):
localCfg.createNewSection('/DIRAC/VOPolicy/%s' % vo)
if not localCfg.isSection('/DIRAC/VOPolicy/%s/%s' % (vo, setup)):
localCfg.createNewSection('/DIRAC/VOPolicy/%s/%s' % (vo, setup))
localCfg.setOption('/DIRAC/VOPolicy/%s/%s/SoftwareDistModule' % (vo, setup), '')
localCfg.writeToFile(localConfigFile)
|
yujikato/DIRAC
|
tests/Jenkins/dirac-cfg-update.py
|
Python
|
gpl-3.0
| 2,689
|
[
"DIRAC"
] |
ac702c3833c0ef124243b12f1486bef88eb3a60bd591a389cb086003994d5cb8
|
"""Takes in grades from the user then prints out relavent data.
Written By: Brian O'Dell, October 2017
The program will take in grades from the user until they enter '-1'.
Once that occurs the while loop will break out, and the frequency of each
category will be displayed. The categories are defined by [x0 - x9] where x
is the most significant digit in the grade. The only exception is in the last
category [90 - 100].
This code is written so that it will throw an exception to handle all execution
When '-1' is entered a RuntimeError exception is raised and then caught.
From there the loop will break exectution.
When any other value is entered a ValueError exception is thrown. When it is
caught all the array operations will occur.
Finally, In the case where the grade is >= 100. The index will become 10,
which is out of the array bounds. This triggers and IndexError exception.
It is caught for the special case of 100, and assigned to index 9.
Otherwise, the input will not be entered and the user will have to enter
it again.
"""
def main():
"""Get the grades and then print the frequencies of each category."""
Freq = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
while(True):
try:
print("Enter a grade")
gradeVal = int(input())
if(gradeVal < 0):
raise RuntimeError("Negative number used")
raise ValueError()
except ValueError:
print("ValueError Exception -- Doing the basic operations "
+ "from inside the exception")
index = int(gradeVal / 10)
try:
Freq[index] = Freq[index] + 1
except IndexError:
print("IndexError Exception -- Handling the case where "
+ "the grade is >= 100")
if(gradeVal == 100):
Freq[9] = Freq[9] + 1
else:
print("Error -- new grade:" + str(gradeVal)
+ " is out of range")
except RuntimeError:
print("RuntimeError Exception -- "
+ "A negative number was entered, exiting loop")
break
"""
We want to print the frequencies in the following format:
0 9 Freq: i
10 19 Freq: i
...
"""
for x in range(10):
if(x != 9 or x != 0):
print("{0:2d} {1:3d} Freq: {2:3d}".format(
(10*x), (10*x + 9), (Freq[x])))
if __name__ == "__main__":
main()
|
brian-o/CS-CourseWork
|
CS471/ExceptionHandling/exceptionInPython.py
|
Python
|
gpl-3.0
| 2,495
|
[
"Brian"
] |
c3568eac445fd6d0e358d740435e0a595a264727d0d93243e5745db035ac6db1
|
from __future__ import unicode_literals
import sys
sys.path = sys.path[1:]
from .forms import ParameterForm, PeakTableForm, MixtureForm, PowerTableForm, PowerForm
from .models import NeuropowerModel
from .utils import get_url, get_neuropower_steps, get_db_entries, get_session_id, create_local_copy, get_neurovault_form
from neuropowercore import cluster, BUM, neuropowermodels
from django.http import HttpResponseRedirect
from plots import plotPower, plotModel
from django.shortcuts import render
from django.conf import settings
from scipy.stats import norm, t
from nilearn import masking
import nibabel as nib
import pandas as pd
import numpy as np
import tempfile
import uuid
import os
temp_dir = tempfile.gettempdir()
## MAIN PAGE TEMPLATE PAGES
def npFAQ(request):
return render(request,"neuropower/neuropowerFAQ.html",{})
def tutorial(request):
return render(request,"neuropower/tutorial.html",{})
def methods(request):
return render(request,"neuropower/methods.html",{})
### SESSION CONTROL
def end_session(request):
'''ends a session so the user can start a new one.'''
try:
request.session.flush()
except KeyError:
pass
return neuropowerstart(request)
### NEUROPOWER TEMPLATE PAGES
def neuropowerstart(request):
'''step 1: start'''
# Get the template/step status
sid = get_session_id(request)
template = "neuropower/neuropowerstart.html"
steps = get_neuropower_steps(template,sid)
context = {"steps":steps}
return render(request,template,context)
def neuropowerinput(request,neurovault_id=None,end_session=False):
'''step 2: input'''
# Create the session id for the user
sid = get_session_id(request)
# get DB entry for sid
try:
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
except NeuropowerModel.DoesNotExist:
neuropowerdata = None
# Get the template/step status
template = "neuropower/neuropowerinput.html"
context = {}
steps = get_neuropower_steps(template,sid)
context["steps"] = steps
# Initiate parameter form
parsform = ParameterForm(request.POST or None,
request.FILES or None,
instance=neuropowerdata,
default_url="URL to nifti image",
err="")
# Check if a message is passed
message = request.GET.get('message','')
context['message'] = message
# Check if redirect from neurovault
neurovault_id = request.GET.get('neurovault','')
if neurovault_id:
neurovault_data = get_neurovault_form(request,neurovault_id)
context['parsform'] = neurovault_data["parsform"]
if not neurovault_data["message"] == None:
context['message'] = neurovault_data["message"]
return render(request,template,context)
# Check if new user or if parameterform is invalid
if not request.method=="POST" or not parsform.is_valid():
context["parsform"] = parsform
return render(request,template,context)
else:
form = parsform.save(commit = False)
form.SID = sid
form.save()
# handle data: copy to local drive
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
# create folder to save map local
if not os.path.exists("/var/maps/"):
os.mkdir("/var/maps/")
# make local copies of map and mask
map_local = "/var/maps/"+sid+"_map"
mask_local = "/var/maps/"+sid+"_mask"
if not neuropowerdata.map_url == "":
map_url = neuropowerdata.map_url
else:
map_url = "https://"+settings.AWS_S3_CUSTOM_DOMAIN+str(neuropowerdata.spmfile.name)
map_local = create_local_copy(map_url,map_local)
if not neuropowerdata.maskfile == "":
mask_url = "https://"+settings.AWS_S3_CUSTOM_DOMAIN+str(neuropowerdata.maskfile.name)
mask_local = create_local_copy(mask_url,mask_local)
# save map locations to database
form = parsform.save(commit = False)
form.map_url = map_url
form.map_local = map_local
if not neuropowerdata.maskfile == "":
form.mask_url = mask_url
form.mask_local = mask_local
else:
form.mask_local = mask_local
form.save()
# perform some higher level cleaning
error = None
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
SPM = nib.load(neuropowerdata.map_local)
if len(SPM.shape)>3:
if not SPM.shape[3]==1 or len(SPM.shape)>4:
error = "shape"
# check if the IQR is realistic (= check whether these are Z- or T-values)
IQR = np.subtract(*np.percentile(SPM.get_data(),[75,25]))
if IQR > 20:
error = "median"
# save other parameters
form.DoF = neuropowerdata.Subj-1 if neuropowerdata.Samples==1 else neuropowerdata.Subj-2
form.ExcZ = float(neuropowerdata.Exc) if float(neuropowerdata.Exc)>1 else -norm.ppf(float(neuropowerdata.Exc))
# if mask does not exist: create
if not error == 'shape':
if neuropowerdata.maskfile == "":
mask = masking.compute_background_mask(SPM,border_size=2, opening=True)
nvox = np.sum(mask.get_data())
form.mask_local = neuropowerdata.mask_local+".nii.gz"
nib.save(mask,form.mask_local)
form.nvox = nvox
# if mask is given: check dimensions
else:
mask = nib.load(neuropowerdata.mask_local).get_data()
if SPM.get_data().shape != mask.shape:
error = "dim"
else:
form.nvox = np.sum(mask)
# throw error if detected
if error:
parsform = ParameterForm(request.POST or None,
request.FILES or None,
default_url="URL to nifti image",
err=error)
context["parsform"] = parsform
return render(request,template,context)
else:
form.step = 1
form.save()
return HttpResponseRedirect('../neuropowertable/')
def neuropowerviewer(request):
# Create the session id for the user
sid = get_session_id(request)
# get DB entry for sid
try:
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
except NeuropowerModel.DoesNotExist:
neuropowerdata = None
# Get the template/step status
template = "neuropower/neuropowerviewer.html"
context = {}
steps = get_neuropower_steps(template,sid)
context["steps"] = steps
# check for unauthorised page visit
link = get_db_entries(template,sid)
if not link == "":
return HttpResponseRedirect(link)
context["url"] = neuropowerdata.map_url
context["thr"] = neuropowerdata.Exc
context["viewer"] = "<div class='papaya' data-params='params'></div>"
return render(request,template,context)
def neuropowertable(request):
# Create the session id for the user
sid = get_session_id(request)
# get DB entry for sid
try:
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
except NeuropowerModel.DoesNotExist:
neuropowerdata = None
# Get the template/step status
template = "neuropower/neuropowertable.html"
context = {}
steps = get_neuropower_steps(template,sid)
context["steps"] = steps
link = get_db_entries(template,sid)
if not link == "":
return HttpResponseRedirect(link)
# Check if a message is passed
message = request.GET.get('message','')
context['message'] = message
# Initiate peak table
peakform = PeakTableForm(instance = neuropowerdata)
form = peakform.save(commit=False)
form.SID = sid
# Compute peaks
SPM = nib.load(neuropowerdata.map_local).get_data()
MASK = nib.load(neuropowerdata.mask_local).get_data()
if neuropowerdata.ZorT == 'T':
SPM = -norm.ppf(t.cdf(-SPM,df=float(neuropowerdata.DoF)))
peaks = cluster.PeakTable(SPM,float(neuropowerdata.ExcZ),MASK)
if len(peaks) < 30:
context["text"] = "There are too few peaks for a good estimation. Either the ROI is too small or the screening threshold is too high."
form.err = context["text"]
else:
pvalues = np.exp(-float(neuropowerdata.ExcZ)*(np.array(peaks.peak)-float(neuropowerdata.ExcZ)))
pvalues = [max(10**(-6),p) for p in pvalues]
peaks['pval'] = pvalues
form.peaktable = peaks
context["peaks"] = peaks.to_html(classes=["table table-striped"])
form.step = 2
form.save()
return render(request,template,context)
def neuropowermodel(request):
# Create the session id for the user
sid = get_session_id(request)
# get DB entry for sid
try:
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
except NeuropowerModel.DoesNotExist:
neuropowerdata = None
# Get the template/step status
template = "neuropower/neuropowermodel.html"
context = {}
steps = get_neuropower_steps(template,sid)
context["steps"] = steps
link = get_db_entries(template,sid)
if not link == "":
return HttpResponseRedirect(link)
peaks = neuropowerdata.peaktable
# Check if a message is passed
message = request.GET.get('message','')
context['message'] = message
# Estimate pi1
bum = BUM.EstimatePi1(peaks.pval.tolist(),starts=20) # :)
if bum['pi1']<0.1:
context['message']=message+"\nWARNING: The estimates prevalence of activation is very low. The estimation procedure gets rather unstable in this case. Proceed with caution."
if bum['pi1']==0:
context['message']=message+"\n The estimated prevalence of activation is zero, which means our model can't find evidence that there is non-null activation in this contrast. As such, a power analysis will not be possible..."
# Estimate mixture model
modelfit = neuropowermodels.modelfit(peaks.peak.tolist(),
bum['pi1'],
exc = float(neuropowerdata.ExcZ),
starts=20,
method="RFT")
# Save estimates to form
mixtureform = MixtureForm(instance = neuropowerdata)
form = mixtureform.save(commit=False)
form.SID = sid
form.pi1 = bum['pi1']
form.a = bum['a']
if bum['pi1']>0:
form.mu = modelfit['mu']
form.sigma = modelfit['sigma']
form.step = 3
else:
form.mu = 0
form.sigma = 0
form.save()
# context["link"] = plotModel(sid)
return render(request,template,context)
def neuropowersamplesize(request):
# Create the session id for the user
sid = get_session_id(request)
# get DB entry for sid
try:
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
except NeuropowerModel.DoesNotExist:
neuropowerdata = None
# Get the template/step status
template = "neuropower/neuropowersamplesize.html"
context = {}
steps = get_neuropower_steps(template,sid)
context["steps"] = steps
link = get_db_entries(template,sid)
if not link == "":
return HttpResponseRedirect(link)
# Check if a message is passed
message = request.GET.get('message','')
context['message'] = message
# Load model data
context['texttop'] = "Hover over the lines to see detailed power predictions"
if not neuropowerdata.err == "":
context["text"] = neuropowerdata.err
return render(request,template,context)
peaks = neuropowerdata.peaktable
powerinputform = PowerForm(request.POST or None,instance=neuropowerdata)
if neuropowerdata.mu==0:
context['message']=message+"\n Our model can't find evidence that there is non-null activation in this contrast. As such, a power analysis will not be possible..."
else:
context["plot"] = True
if neuropowerdata.pi1<0.1:
context['message']=message+"\nWARNING: The estimates prevalence of activation is very low. The estimation procedure gets rather unstable in this case. Proceed with caution."
# Estimate smoothness
if neuropowerdata.SmoothEst==1:
#Manual
FWHM = np.array([float(neuropowerdata.Smoothx),float(neuropowerdata.Smoothy),float(neuropowerdata.Smoothz)])
voxsize = np.array([float(neuropowerdata.Voxx),float(neuropowerdata.Voxy),float(neuropowerdata.Voxz)])
elif neuropowerdata.SmoothEst==2:
# Estimate from data
cmd_smooth = "smoothest -V -z "+neuropowerdata.map_local+" -m "+neuropowerdata.mask_local
tmp = os.popen(cmd_smooth).read()
FWHM = np.array([float(x[8:15]) for x in tmp.split("\n")[16].split(",")])
voxsize=np.array([1,1,1])
# Compute thresholds and standardised effect size
thresholds = neuropowermodels.threshold(peaks.peak,peaks.pval,FWHM=FWHM,voxsize=voxsize,nvox=float(neuropowerdata.nvox),alpha=float(neuropowerdata.alpha),exc=float(neuropowerdata.ExcZ))
effect_cohen = float(neuropowerdata.mu)/np.sqrt(float(neuropowerdata.Subj))
# Compute predicted power
power_predicted = []
newsubs = range(neuropowerdata.Subj,neuropowerdata.Subj+600)
for s in newsubs:
projected_effect = float(effect_cohen)*np.sqrt(float(s))
powerpred = {k:1-neuropowermodels.altCDF([v],projected_effect,float(neuropowerdata.sigma),exc=float(neuropowerdata.ExcZ),method="RFT")[0] for k,v in thresholds.items() if not v == 'nan'}
power_predicted.append(powerpred)
# Check if there are thresholds (mainly BH) missing
missing = [k for k,v in thresholds.items() if v == 'nan']
if len(missing) > 0:
context['MCPwarning']="There is not enough power to estimate a threshold for "+" and ".join(missing)+"."
# Save power calculation to table and model
powertable = pd.DataFrame(power_predicted)
powertable['newsamplesize']=newsubs
powertableform = PowerTableForm(instance=neuropowerdata)
savepowertableform = powertableform.save(commit=False)
savepowertableform.SID = sid
savepowertableform.data = powertable
savepowertableform.step = 4
savepowertableform.save()
context['plothtml'] = plotPower(sid)['code']
# Adjust plot with specific power or sample size question
if request.method == "POST":
if powerinputform.is_valid():
savepowerinputform = powerinputform.save(commit=False)
savepowerinputform.SID = sid
savepowerinputform.save()
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
pow = float(neuropowerdata.reqPow)
ss = neuropowerdata.reqSS
plotpower = plotPower(sid,neuropowerdata.MCP,pow,ss)
context['plothtml'] = plotpower['code']
context["textbottom"] = plotpower['text']
context["powerinputform"] = powerinputform
return render(request,template,context)
def neuropowercrosstab(request):
# Create the session id for the user
sid = get_session_id(request)
# get DB entry for sid
try:
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
except NeuropowerModel.DoesNotExist:
neuropowerdata = None
# Get the template/step status
template = "neuropower/neuropowercrosstab.html"
context = {}
steps = get_neuropower_steps(template,sid)
context["steps"] = steps
link = get_db_entries(template,sid)
if not link == "":
return HttpResponseRedirect(link)
# Check if a message is passed
message = request.GET.get('message','')
context['message'] = message
if neuropowerdata.mu==0:
context['message']="\n Our model can't find evidence that there is non-null activation. As such, a power analysis will not be possible..."
else:
if neuropowerdata.pi1<0.1:
context['message']="\nWARNING: The estimates prevalence of activation is very low. The estimation procedure gets rather unstable in this case. Proceed with caution."
# Load model data
if not neuropowerdata.err == "":
context["text"] = peakdata.err
return render(request,template,context)
# Restyle power table for export
names = neuropowerdata.data.columns.tolist()[:-1]
names.insert(0,'newsamplesize')
powertable = neuropowerdata.data[names].round(decimals=2)
repldict = {'BF':'Bonferroni','BH':'Benjamini-Hochberg','UN':'Uncorrected','RFT':'Random Field Theory','newsamplesize':'Samplesize'}
for word, initial in repldict.items():
names=[i.replace(word,initial) for i in names]
powertable.columns=names
context["power"] = powertable.to_html(index=False,col_space='120px',classes=["table table-striped"])
return render(request,template,context)
|
neuropower/neuropower
|
neuropower/apps/neuropowertoolbox/views.py
|
Python
|
mit
| 17,244
|
[
"VisIt"
] |
b29ba2dbf5114fee8435441aed09eba4fa9decfbd0bb16aebd614591ebb2330c
|
from __future__ import unicode_literals
import sys
import spotipy
from pytify.history import history
from spotipy.oauth2 import SpotifyClientCredentials
# Fetch songs with spotify api
class Pytifylib:
# hold songs
_songs = {}
# limit output songs
_limit = 15
def _spotify(self):
return self.getCredentials()
def getCredentials(self):
try:
return spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
except spotipy.oauth2.SpotifyOauthError:
print('Did not find Spotify credentials.')
print('Please visit https://github.com/bjarneo/pytify#credentials for more information.')
sys.exit(1)
# query
def query(self, query):
try:
data = self.search(query)
self.set_songs(data=data)
return True
except Exception as e:
print(e)
return False
# Search for song / artist
def search(self, query, type='artist,track'):
try:
response = self._spotify().search(q='+'.join(query.split()), type=type, limit=self._limit)
except spotipy.client.SpotifyException:
print('Search went wrong? Please try again.')
return False
return response
def set_songs(self, data):
for index, song in enumerate(data['tracks']['items']):
artist_name = song['artists'][0]['name'][:25]
song_name = song['name'][:30]
album_name = song['album']['name'][:30]
self._songs[index + 1] = {
'href': song['uri'],
'artist': artist_name,
'song': song_name,
'album': album_name
}
def get_songs(self):
return self._songs
# List all. Limit if needed
def list(self):
list = []
space = '{0:3} | {1:25} | {2:30} | {3:30}'
list.append(space.format('#', 'Artist', 'Song', 'Album'))
# Just to make it pwitty
list.append(space.format(
'-' * 3,
'-' * 25,
'-' * 30,
'-' * 30
))
for key, song in self.get_songs().items():
list.append(space.format(
'%d.' % key,
'%s' % song['artist'],
'%s' % song['song'],
'%s' % song['album']
))
return list
def _get_song_uri_at_index(self, index):
return str(self._songs[index]['href'])
def _get_song_name_at_index(self, index):
return str(
'%s - %s' % (self._songs[index]['artist'],
self._songs[index]['song'])
)
def print_history(self):
print('\nLast ten entries from history:')
entries = history().load_history_strings()
entries = list(entries)
qty = len(entries)
for entry in entries[qty-10:qty]:
print(entry)
def listen(self, index):
raise NotImplementedError()
def next(self):
raise NotImplementedError()
def prev(self):
raise NotImplementedError()
def play_pause(self):
raise NotImplementedError()
def pause(self):
raise NotImplementedError()
def get_current_playing(self):
return ''
|
bjarneo/Pytify
|
pytify/pytifylib.py
|
Python
|
mit
| 3,312
|
[
"VisIt"
] |
ad93200e7995177c5290cf771e3637e5a208dd30020000780bf98212e0c723ee
|
from __future__ import unicode_literals
import re
import random
import string
from unittest.mock import patch
from datetime import datetime
# from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth.models import AnonymousUser, User
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django_comments.views import comments
from django_comments_xtd import django_comments, signals, signed, views
from django_comments_xtd.conf import settings
from django_comments_xtd.models import XtdComment
from django_comments_xtd.tests.models import Article, Diary
request_factory = RequestFactory()
def post_article_comment(data, article, auth_user=None):
request = request_factory.post(
reverse('article-detail', kwargs={'year': article.publish.year,
'month': article.publish.month,
'day': article.publish.day,
'slug': article.slug}),
data=data, follow=True)
if auth_user:
request.user = auth_user
else:
request.user = AnonymousUser()
request._dont_enforce_csrf_checks = True
return comments.post_comment(request)
def post_diary_comment(data, diary_entry, auth_user=None):
request = request_factory.post(
reverse('diary-detail', kwargs={'year': diary_entry.publish.year,
'month': diary_entry.publish.month,
'day': diary_entry.publish.day}),
data=data, follow=True)
if auth_user:
request.user = auth_user
else:
request.user = AnonymousUser()
request._dont_enforce_csrf_checks = True
return comments.post_comment(request)
def confirm_comment_url(key, follow=True):
request = request_factory.get(reverse("comments-xtd-confirm",
kwargs={'key': key}),
follow=follow)
request.user = AnonymousUser()
return views.confirm(request, key)
app_model_options_mock = {
'tests.article': {
'who_can_post': 'users'
}
}
class OnCommentWasPostedTestCase(TestCase):
def setUp(self):
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="October", slug="october", body="What I did on October...")
self.form = django_comments.get_form()(self.article)
self.user = AnonymousUser()
def post_valid_data(self, auth_user=None, response_code=302):
data = {"name": "Bob", "email": "bob@example.com", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine..."}
data.update(self.form.initial)
response = post_article_comment(data, self.article, auth_user)
self.assertEqual(response.status_code, response_code)
if response.status_code == 302:
self.assertTrue(response.url.startswith('/comments/posted/?c='))
def test_post_as_authenticated_user(self):
self.user = User.objects.create_user("bob", "bob@example.com", "pwd")
self.assertTrue(self.mock_mailer.call_count == 0)
self.post_valid_data(auth_user=self.user)
# no confirmation email sent as user is authenticated
self.assertTrue(self.mock_mailer.call_count == 0)
def test_confirmation_email_is_sent(self):
self.assertTrue(self.mock_mailer.call_count == 0)
self.post_valid_data()
self.assertTrue(self.mock_mailer.call_count == 1)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_APP_MODEL_OPTIONS=app_model_options_mock)
def test_post_as_visitor_when_only_users_can_post(self):
self.assertTrue(self.mock_mailer.call_count == 0)
self.post_valid_data(response_code=400)
self.assertTrue(self.mock_mailer.call_count == 0)
class ConfirmCommentTestCase(TestCase):
def setUp(self):
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
# Create random string so that it's harder for zlib to compress
content = ''.join(random.choice(string.printable) for _ in range(6096))
self.article = Article.objects.create(title="September",
slug="september",
body="In September..." + content)
self.form = django_comments.get_form()(self.article)
data = {"name": "Bob", "email": "bob@example.com", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal iene kleine..."}
data.update(self.form.initial)
response = post_article_comment(data, self.article)
self.assertTrue(self.mock_mailer.call_count == 1)
self.key = str(
re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]
).group("key")
)
self.addCleanup(patcher.stop)
def test_confirm_url_is_short_enough(self):
# Tests that the length of the confirm url's length isn't
# dependent on the article length.
l = len(reverse("comments-xtd-confirm",
kwargs={'key': self.key}))
# print("\nXXXXXXXXXXX:", l)
self.assertLessEqual(l, 4096, "Urls can only be a max of 4096")
def test_400_on_bad_signature(self):
response = confirm_comment_url(self.key[:-1])
self.assertEqual(response.status_code, 400)
def test_consecutive_confirmation_url_visits_doesnt_fail(self):
# test that consecutives visits to the same confirmation URL produce
# an Http 404 code, as the comment has already been verified in the
# first visit
response = confirm_comment_url(self.key)
self.assertEqual(response.status_code, 302)
confirm_comment_url(self.key)
self.assertEqual(response.status_code, 302)
def test_signal_receiver_may_discard_the_comment(self):
# test that receivers of signal confirmation_received may return False
# and thus rendering a template_discarded output
def on_signal(sender, comment, request, **kwargs):
return False
self.assertEqual(self.mock_mailer.call_count, 1) # sent during setUp
signals.confirmation_received.connect(on_signal)
response = confirm_comment_url(self.key)
# mailing avoided by on_signal:
self.assertEqual(self.mock_mailer.call_count, 1)
self.assertTrue(response.content.find(b'Comment discarded') > -1)
def test_comment_is_created_and_view_redirect(self):
# testing that visiting a correct confirmation URL creates a XtdComment
# and redirects to the article detail page
Site.objects.get_current().domain = "testserver" # django bug #7743
response = confirm_comment_url(self.key, follow=False)
data = signed.loads(self.key, extra_key=settings.COMMENTS_XTD_SALT)
try:
comment = XtdComment.objects.get(
content_type=data["content_type"],
user_name=data["user_name"],
user_email=data["user_email"],
submit_date=data["submit_date"])
except:
comment = None
self.assertTrue(comment is not None)
self.assertEqual(response.url, comment.get_absolute_url())
def test_notify_comment_followers(self):
# send a couple of comments to the article with followup=True and check
# that when the second comment is confirmed a followup notification
# email is sent to the user who sent the first comment
self.assertEqual(self.mock_mailer.call_count, 1)
confirm_comment_url(self.key)
# no comment followers yet:
self.assertEqual(self.mock_mailer.call_count, 1)
# send 2nd comment
self.form = django_comments.get_form()(self.article)
data = {"name": "Alice", "email": "alice@example.com",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine..."}
data.update(self.form.initial)
response = post_article_comment(data, article=self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.assertEqual(self.mock_mailer.call_count, 2)
self.key = re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key")
confirm_comment_url(self.key)
self.assertEqual(self.mock_mailer.call_count, 3)
self.assertTrue(self.mock_mailer.call_args[0][3] == ["bob@example.com"])
self.assertTrue(self.mock_mailer.call_args[0][1].find(
"There is a new comment following up yours.") > -1)
def test_notify_followers_dupes(self):
# first of all confirm Bob's comment otherwise it doesn't reach DB
confirm_comment_url(self.key)
# then put in play pull-request-15's assert...
# https://github.com/danirus/django-comments-xtd/pull/15
diary = Diary.objects.create(
body='Lorem ipsum',
allow_comments=True
)
self.assertEqual(diary.pk, self.article.pk)
self.form = django_comments.get_form()(diary)
data = {"name": "Charlie", "email": "charlie@example.com",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine..."}
data.update(self.form.initial)
response = post_diary_comment(data, diary_entry=diary)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.key = str(re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key"))
# 1) confirmation for Bob (sent in `setUp()`)
# 2) confirmation for Charlie
self.assertEqual(self.mock_mailer.call_count, 2)
response = confirm_comment_url(self.key)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/cr/'))
self.assertEqual(self.mock_mailer.call_count, 2)
self.form = django_comments.get_form()(self.article)
data = {"name": "Alice", "email": "alice@example.com",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal iene kleine..."}
data.update(self.form.initial)
response = post_article_comment(data, article=self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.assertEqual(self.mock_mailer.call_count, 3)
self.key = re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key")
confirm_comment_url(self.key)
self.assertEqual(self.mock_mailer.call_count, 4)
self.assertTrue(self.mock_mailer.call_args[0][3] == ["bob@example.com"])
self.assertTrue(self.mock_mailer.call_args[0][1].find(
"There is a new comment following up yours.") > -1)
def test_no_notification_for_same_user_email(self):
# test that a follow-up user_email don't get a notification when
# sending another email to the thread
self.assertEqual(self.mock_mailer.call_count, 1)
confirm_comment_url(self.key) # confirm Bob's comment
# no comment followers yet:
self.assertEqual(self.mock_mailer.call_count, 1)
# send Bob's 2nd comment
self.form = django_comments.get_form()(self.article)
data = {"name": "Alice", "email": "bob@example.com", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment": "Bob's comment he shouldn't get notified about"}
data.update(self.form.initial)
response = post_article_comment(data, self.article)
self.assertEqual(self.mock_mailer.call_count, 2)
self.key = re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key")
confirm_comment_url(self.key)
self.assertEqual(self.mock_mailer.call_count, 2)
class ReplyNoCommentTestCase(TestCase):
def test_reply_non_existing_comment_raises_404(self):
response = self.client.get(reverse("comments-xtd-reply",
kwargs={"cid": 1}))
self.assertContains(response, "404", status_code=404)
class ReplyCommentTestCase(TestCase):
def setUp(self):
article = Article.objects.create(title="September",
slug="september",
body="What I did on September...")
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 1 to article, level 0
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="comment 1 to article",
submit_date=datetime.now())
# post Comment 2 to article, level 1
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="comment 1 to comment 1",
submit_date=datetime.now(),
parent_id=1)
# post Comment 3 to article, level 2 (max according to test settings)
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="comment 1 to comment 1",
submit_date=datetime.now(),
parent_id=2)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_MAX_THREAD_LEVEL=2)
def test_not_allow_threaded_reply_raises_403(self):
response = self.client.get(reverse("comments-xtd-reply",
kwargs={"cid": 3}))
self.assertEqual(response.status_code, 403)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_APP_MODEL_OPTIONS=app_model_options_mock)
def test_reply_as_visitor_when_only_users_can_post(self):
response = self.client.get(reverse("comments-xtd-reply",
kwargs={"cid": 1}))
self.assertEqual(response.status_code, 302) # Redirect to login.
self.assertTrue(response.url.startswith(settings.LOGIN_URL))
class MuteFollowUpTestCase(TestCase):
def setUp(self):
# Creates an article and send two comments to the article with
# follow-up notifications. First comment doesn't have to send any
# notification.
# Second comment has to send one notification (to Bob).
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="September", slug="september", body="John's September")
self.form = django_comments.get_form()(self.article)
# Bob sends 1st comment to the article with follow-up
data = {"name": "Bob", "email": "bob@example.com", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment": "Nice September you had..."}
data.update(self.form.initial)
response = post_article_comment(data, self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.assertTrue(self.mock_mailer.call_count == 1)
bobkey = str(re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key"))
confirm_comment_url(bobkey) # confirm Bob's comment
# Alice sends 2nd comment to the article with follow-up
data = {"name": "Alice", "email": "alice@example.com",
"followup": True, "reply_to": 1, "level": 1, "order": 1,
"comment": "Yeah, great photos"}
data.update(self.form.initial)
response = post_article_comment(data, self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.assertTrue(self.mock_mailer.call_count == 2)
alicekey = str(re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key"))
confirm_comment_url(alicekey) # confirm Alice's comment
# Bob receives a follow-up notification
self.assertTrue(self.mock_mailer.call_count == 3)
self.bobs_mutekey = str(re.search(
r'http://.+/mute/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]).group("key"))
self.addCleanup(patcher.stop)
def get_mute_followup_url(self, key):
request = request_factory.get(reverse("comments-xtd-mute",
kwargs={'key': key}),
follow=True)
request.user = AnonymousUser()
response = views.mute(request, key)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.content.find(b'Comment thread muted') > -1)
return response
def test_mute_followup_notifications(self):
# Bob's receive a notification and click on the mute link to
# avoid additional comment messages on the same article.
self.get_mute_followup_url(self.bobs_mutekey)
# Alice sends 3rd comment to the article with follow-up
data = {"name": "Alice", "email": "alice@example.com",
"followup": True, "reply_to": 2, "level": 1, "order": 1,
"comment": "And look at this and that..."}
data.update(self.form.initial)
response = post_article_comment(data, self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
# Alice confirms her comment...
self.assertTrue(self.mock_mailer.call_count == 4)
alicekey = str(
re.search(r'http://.+/confirm/(?P<key>[\S]+)/',
self.mock_mailer.call_args[0][1]
).group("key")
)
confirm_comment_url(alicekey) # confirm Alice's comment
# Alice confirmed her comment, but this time Bob won't receive any
# notification, neither do Alice being the sender
self.assertTrue(self.mock_mailer.call_count == 4)
class HTMLDisabledMailTestCase(TestCase):
def setUp(self):
# Create an article and send a comment. Test method will chech headers
# to see wheter messages has multiparts or not.
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="September", slug="september", body="John's September")
self.form = django_comments.get_form()(self.article)
# Bob sends 1st comment to the article with follow-up
self.data = {"name": "Bob", "email": "bob@example.com",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Nice September you had..."}
self.data.update(self.form.initial)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_SEND_HTML_EMAIL=False)
def test_mail_does_not_contain_html_part(self):
with patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_SEND_HTML_EMAIL=False):
response = post_article_comment(self.data, self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.assertTrue(self.mock_mailer.call_count == 1)
self.assertTrue(self.mock_mailer.call_args[1]['html'] is None)
def test_mail_does_contain_html_part(self):
response = post_article_comment(self.data, self.article)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/comments/posted/?c='))
self.assertTrue(self.mock_mailer.call_count == 1)
self.assertTrue(self.mock_mailer.call_args[1]['html'] is not None)
|
danirus/django-comments-xtd
|
django_comments_xtd/tests/test_views.py
|
Python
|
bsd-2-clause
| 21,327
|
[
"VisIt"
] |
74a9220b5a8c9f983424c86d71633504adc279c700f1357e5b16fe2acda38271
|
"""
The Phi DSL is all about creating and combining functions in useful ways, enabling a declarative approach that can improve clarity, readability and lead to shorter code. Its has two main functionalities
1. The lambdas capabilities which let quickly create readable functions.
2. The `Expression` combinator methods that let you build up complex computations.
The DSL has very few rules but its important to know them
* **Functions** : all functions of arity 1 are members of the DSL. Any object that defines `__call__` is accepted but if its arity is not 1 there will be problems.
* **Values** : any value e.g. `val` is part of the DSL but internally it will be compiled the constant function `lambda x: val`
* **Expressions** : all `Expression`s are elements of the DSL. See `phi.dsl.Expression`.
* **Containers** : the container types `list`, `tuple`, `set`, and `dict` are elements of the DSL and are translated into their counterparts `phi.dsl.Expression.List`, `phi.dsl.Expression.Tuple`, `phi.dsl.Expression.Set`, and `phi.dsl.Expression.Dict`.
Any expresion can appear inside other expression in a nested fasion. They correct way to think about this is that each sub-expression will be compiled to a function of arity 1, therefore from the parent expresion's point of view all of its elements are just functions.
** Expressions **
`Expression` overrides all operators plus the `__getitem__` and `__call__` methods, this allows you to create functions by just writting formulas. For example
from phi import P
f = (P * 2) / (P + 1)
assert f(1) == 1 #( 1 * 2 ) / ( 1 + 1) == 2 / 2 == 1
the previous expression for `f` is equivalent to
lambda x: (x * 2) / (x + 1)
As you see, it creates very math-like functions that are very readable. The overloading mechanism has the following rules:
Let `g` be a Expression, `h` any expression of the DSL, and `$` any python operator, then
f = g $ h
is equivalent to
lambda x: g(x) $ h(x)
*__getitem__*
The special method `__getitem__` is also implemented and enables you to define a lambda uses pythons access mechanism on its argument. The expression
P[x]
is equivalent to
lambda obj: obj[x]
** Examples **
Add the first and last element of a list
from phi import P
f = P[0] + P[-1]
assert f([1, 2, 3, 4]) == 5 #1 + 4 == 5
** State **
You might see function like `phi.dsl.Expression.Read` and `phi.dsl.Expression.Write` that make the look as if you are doing stateful voodoo behind the scenes, dont worry, internally `Expression` is implemented using a pattern that passes state `dict` from lambda to lambda in a functional manner. All normal functions of the form
y = f(x)
are lifted to
(y, new_state) = f(x, state)
This way `phi.dsl.Expression.Read` and `phi.dsl.Expression.Write` can be implemented in such a way that they can read/write from/to the state being passed around, `Write` returns a new state with the updated values, all operations are immutable. Since Expressions internally return a tuple with a value and a dict, you might wonder why you only get the value when you call a Expression, see `__call__` next.
** __call__ **
def __call__(self, x, *return_state, **state)
*Arguments*
* `x` : a value to apply the computation
* `*return_state` : an optional boolean to determine whether the resulting internal state should be returned, defaults to `False`
* `**state` : all keyword argument are interpreted as initial values from the state `dict` that will be passed through the computation, defaults to `{}`.
Normally you call a `Expression` only passing the value
f = P + 1
f(1) == 2
however if you pass an extra argument with `True` you can get the state back
f = P + 1
f(1, True) == (2, {})
and if you pass keyword arguments you will se that the returned state includes them
f = P + 1
f(1, True, a=0) == (2, {"a": 0})
Naturally this behaviour is only useful if you include expression that do something with the state, so lets do that
from phi import P, Read, Write, Seq
f = Seq(Read("a"), P + 5, Write("a"))
f(None, True, a=0) == (5, {"a": 5})
Here we pass `None` to `f` but also set `a = 0` internally and then
1. `Read("a")` dicards `None` and sets the value to `0` which is the current value of `a`
2. `P + 5` adds `5` to `0`
3. `Write("a")` sets the value `a` of the state to `5`
The previous can also be written more compactly as
f = Read("a") + 5 >> Write("a")
f(None, True, a=0) == (5, {"a": 5})
or even
f = Read.a + 5 >> Write.a
assert f(None, True, a=0) == (5, {"a": 5})
** `>>` **
The the operator `>>` is NOT a lambda for [bitwise right shift](https://www.tutorialspoint.com/python/bitwise_operators_example.htm), instead
f >> g
represents functions composition in a sequential manner such that the previous is equivalent to
lambda x: g(f(x))
See `phi.dsl.Expression.Seq`. As you see functions are executed in the order they appear which makes code more readable and easier to reason about.
** << **
This operator composes functions according to the mathematical definition, that is
f << g
is equivalent to
lambda x: f(g(x))
*Composition Comparison*
* `f >> g` is equivalent to `lambda x: g(f(x))`. `f` is executed first then `g`. Reads left to right.
* `f << g` is equivalent to `lambda x: f(g(x))`. `g` is executed first then `f`. Reads right to left.
** fn.py **
The operator overloading mechanism of `Expression` to create quick functions takes much of its inspiration and some code from [fn.py](https://github.com/fnpy/fn.py)'s '`_`' object, however it different in that it only creates functions of arity 1 to comply with the DSL. Where in fn.py expressions like
_ + _
are equivalent to
lambda a, b: a + b
That is, every time `_` appears in a compound expresion it creates a function of a higher arity. Instead in phi the expresion
P + P
is interpreted as
lambda a: a + a
In the context of the DSL this is more useful since it allows you to write expressions like
f = P.map(P ** 2) >> list >> P[0] + P[1] >> math.sqrt #f = lambda x: math.sqrt( x[0] ** 2 + x[1] ** 2)
assert f([3, 4]) == 5
where `P[0] + P[1]` creates the lambda of a single input argument `lambda x: x[0] + x[1]` that fits nice with the function composition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .utils import identity
from . import utils
from abc import ABCMeta, abstractmethod
from inspect import isclass
import functools
import operator
###############################
# Expression Helpers
###############################
def _fmap(opt):
def method(self, other):
f = self._f
g = _parse(other)._f
def h(x, state):
y1, state1 = f(x, state)
y2, state2 = g(x, state)
y_out = opt(y1, y2)
state_out = utils.merge(state1, state2)
return y_out, state_out
return self.__unit__(h)
return method
def _fmap_flip(opt):
def method(self, other):
f = self._f
g = _parse(other)._f
def h(x, state):
y2, state = g(x, state)
y1, state = f(x, state)
y_out = opt(y2, y1)
return y_out, state
return self.__unit__(h)
return method
def _unary_fmap(opt):
def method(self):
return self.__then__(utils.lift(opt))
return method
###############################
# Helpers
###############################
class _RefProxy(object):
"""docstring for _ReadProxy."""
def __getattr__(self, name):
return _StateContextManager.REFS[name]
def __getitem__(self, name):
return _StateContextManager.REFS[name]
def __call__(self, *args, **kwargs):
return Ref(*args, **kwargs)
_RefProxyInstance = _RefProxy()
class _StateContextManager(object):
REFS = None
def __init__(self, next_refs):
self.previous_refs = _StateContextManager.REFS
self.next_refs = next_refs
def __enter__(self):
_StateContextManager.REFS = self.next_refs
def __exit__(self, *args):
_StateContextManager.REFS = self.previous_refs
class Ref(object):
"""
Returns an object that helps you to inmediatly create and [read](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Read) [references](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Ref).
**Creating Refences**
You can manually create a [Ref](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Ref) outside the DSL using `Ref` and then pass to as/to a `phi.dsl.Expression.Read` or `phi.dsl.Expression.Write` expression. Here is a contrived example
from phi import P, Ref
r = Ref('r')
assert [600, 3, 6] == P.Pipe(
2,
P + 1, {'a'}, # a = 2 + 1 = 3
P * 2, {'b'}, # b = 3 * 2 = 6
P * 100, {'c', r }, # c = r = 6 * 100 = 600
['c', 'a', 'b']
)
assert r() == 600
**Reading Refences from the Current Context**
While the expression `Read.a` with return a function that will discard its argument and return the value of the reference `x` in the current context, the expression `Ref.x` will return the value inmediatly, this is useful when using it inside pyton lambdas.
Read.x(None) <=> Ref.x
As an example
from phi import P, Obj, Ref
assert {'a': 97, 'b': 98, 'c': 99} == P.Pipe(
"a b c", Obj
.split(' ').Write.keys # keys = ['a', 'b', 'c']
.map(ord), # [ord('a'), ord('b'), ord('c')] == [97, 98, 99]
lambda it: zip(Ref.keys, it), # [('a', 97), ('b', 98), ('c', 99)]
dict # {'a': 97, 'b': 98, 'c': 99}
)
"""
def __init__(self, name, value=utils.NO_VALUE):
super(Ref, self).__init__()
self.name = name
"""
The reference name. Can be though a key in a dictionary.
"""
self.value = value
"""
The value of the reference. Can be though a value in a dictionary.
"""
def __call__(self, *optional):
"""
Returns the value of the reference. Any number of arguments can be passed, they will all be ignored.
"""
if self.value is utils.NO_VALUE:
raise Exception("Trying to read Ref('{0}') before assignment".format(self.name))
return self.value
def write(self, x):
"""
Sets the value of the reference equal to the input argument `x`. Its also an identity function since it returns `x`.
"""
self.value = x
return x
class _ReadProxy(object):
"""docstring for _ReadProxy."""
def __init__(self, __builder__):
self.__builder__ = __builder__
def __getattr__(self, name):
return self.__do__(name)
def __call__ (self, name):
return self.__do__(name)
def __do__(self, name):
g = lambda z, state: (state[name], state)
return self.__builder__.__then__(g)
class _ObjectProxy(object):
"""docstring for Underscore."""
def __init__(self, __builder__):
self.__builder__ = __builder__
def __getattr__(self, name):
def method_proxy(*args, **kwargs):
f = lambda x: getattr(x, name)(*args, **kwargs)
return self.__builder__.__then__(utils.lift(f))
return method_proxy
class _RecordProxy(object):
"""docstring for _RecordProxy."""
def __init__(self, __builder__):
self.__builder__ = __builder__
def __call__(self, attr):
f = utils.lift(lambda x: getattr(x, attr))
return self.__builder__.__then__(f)
def __getattr__ (self, attr):
f = utils.lift(lambda x: getattr(x, attr))
return self.__builder__.__then__(f)
class _RecordObject(dict):
"""docstring for DictObject."""
def __init__(self,*arg,**kw):
super(_RecordObject, self).__init__(*arg, **kw)
def __getattr__ (self, attr):
return self[attr]
class _WithContextManager(object):
WITH_GLOBAL_CONTEXT = utils.NO_VALUE
def __init__(self, new_scope):
self.new_scope = new_scope
self.old_scope = _WithContextManager.WITH_GLOBAL_CONTEXT
def __enter__(self):
_WithContextManager.WITH_GLOBAL_CONTEXT = self.new_scope
def __exit__(self, *args):
_WithContextManager.WITH_GLOBAL_CONTEXT = self.old_scope
###############################
# DSL Elements
###############################
class Expression(object):
"""
All elements of this language are callables (implement `__call__`) of arity 1.
** Examples **
Compiling a function just returns back the function
Seq(f) == f
and piping through a function is just the same a applying the function
Pipe(x, f) == f(x)
"""
def __init__(self, f=utils.state_identity):
self._f = f
def __unit__(self, f, _return_type=None):
"Monadic unit, also known as `return`"
if _return_type:
return _return_type(f)
else:
return self.__class__(f)
def __then__(self, other, **kwargs):
f = self._f
g = other
h = lambda x, state: g(*f(x, state))
return self.__unit__(h, **kwargs)
def __call__(self, __x__, *__return_state__, **state):
x = __x__
return_state = __return_state__
if len(return_state) == 1 and type(return_state[0]) is not bool:
raise Exception("Invalid return state condition, got {return_state}".format(return_state=return_state))
with _StateContextManager(state):
y, next_state = self._f(x, state)
return (y, next_state) if len(return_state) >= 1 and return_state[0] else y
def __hash__(self):
return hash(self._f)
def F(self, expr):
return self >> expr
def Pipe(self, *sequence, **kwargs):
"""
`Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator.
**Arguments**
* ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`.
* ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info.
The expression
Pipe(*sequence, **kwargs)
is equivalent to
Seq(*sequence, **kwargs)(None)
Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded.
**Examples**
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
x = P.Pipe(
1, #input
add1, #1 + 1 == 2
mul3 #2 * 3 == 6
)
assert x == 6
The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions
from phi import P
x = P.Pipe(
1, #input
P + 1, #1 + 1 == 2
P * 3 #2 * 3 == 6
)
assert x == 6
**Also see**
* `phi.builder.Builder.Seq`
* [dsl](https://cgarciae.github.io/phi/dsl.m.html)
* [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
* [lambdas](https://cgarciae.github.io/phi/lambdas.m.html)
"""
state = kwargs.pop("refs", {})
return self.Seq(*sequence, **kwargs)(None, **state)
def ThenAt(self, n, f, *_args, **kwargs):
"""
`ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function.
**Arguments**
* **n**: position at which the created partial will apply its awaited argument on the original function.
* **f**: function which the partial will be created.
* **_args & kwargs**: all `*_args` and `**kwargs` will be passed to the function `f`.
* `_return_type = None`: type of the returned `builder`, if `None` it will return the same type of the current `builder`. This special kwarg will NOT be passed to `f`.
You can think of `n` as the position that the value being piped down will pass through the `f`. Say you have the following expression
D == fun(A, B, C)
all the following are equivalent
from phi import P, Pipe, ThenAt
D == Pipe(A, ThenAt(1, fun, B, C))
D == Pipe(B, ThenAt(2, fun, A, C))
D == Pipe(C, ThenAt(3, fun, A, B))
you could also use the shortcuts `Then`, `Then2`,..., `Then5`, which are more readable
from phi import P, Pipe
D == Pipe(A, P.Then(fun, B, C))
D == Pipe(B, P.Then2(fun, A, C))
D == Pipe(C, P.Then3(fun, A, B))
There is a special case not discussed above: `n = 0`. When this happens only the arguments given will be applied to `f`, this method it will return a partial that expects a single argument but completely ignores it
from phi import P
D == Pipe(None, P.ThenAt(0, fun, A, B, C))
D == Pipe(None, P.Then0(fun, A, B, C))
**Examples**
Max of 6 and the argument:
from phi import P
assert 6 == P.Pipe(
2,
P.Then(max, 6)
)
Previous is equivalent to
assert 6 == max(2, 6)
Open a file in read mode (`'r'`)
from phi import P
f = P.Pipe(
"file.txt",
P.Then(open, 'r')
)
Previous is equivalent to
f = open("file.txt", 'r')
Split a string by whitespace and then get the length of each word
from phi import P
assert [5, 5, 5] == P.Pipe(
"Again hello world",
P.Then(str.split, ' ')
.Then2(map, len)
)
Previous is equivalent to
x = "Again hello world"
x = str.split(x, ' ')
x = map(len, x)
assert [5, 5, 5] == x
As you see, `Then2` was very useful because `map` accepts and `iterable` as its `2nd` parameter. You can rewrite the previous using the [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) and the `phi.builder.Builder.Obj` object
from phi import P, Obj
assert [5, 5, 5] == P.Pipe(
"Again hello world",
Obj.split(' '),
P.map(len)
)
**Also see**
* `phi.builder.Builder.Obj`
* [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html)
* `phi.builder.Builder.RegisterAt`
"""
_return_type = None
n_args = n - 1
if '_return_type' in kwargs:
_return_type = kwargs['_return_type']
del kwargs['_return_type']
@utils.lift
def g(x):
new_args = _args[0:n_args] + (x,) + _args[n_args:] if n_args >= 0 else _args
return f(*new_args, **kwargs)
return self.__then__(g, _return_type=_return_type)
def Then0(self, f, *args, **kwargs):
"""
`Then0(f, ...)` is equivalent to `ThenAt(0, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(0, f, *args, **kwargs)
def Then(self, f, *args, **kwargs):
"""
`Then(f, ...)` is equivalent to `ThenAt(1, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(1, f, *args, **kwargs)
Then1 = Then
def Then2(self, f, arg1, *args, **kwargs):
"""
`Then2(f, ...)` is equivalent to `ThenAt(2, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1,) + args
return self.ThenAt(2, f, *args, **kwargs)
def Then3(self, f, arg1, arg2, *args, **kwargs):
"""
`Then3(f, ...)` is equivalent to `ThenAt(3, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2) + args
return self.ThenAt(3, f, *args, **kwargs)
def Then4(self, f, arg1, arg2, arg3, *args, **kwargs):
"""
`Then4(f, ...)` is equivalent to `ThenAt(4, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2, arg3) + args
return self.ThenAt(4, f, *args, **kwargs)
def Then5(self, f, arg1, arg2, arg3, arg4, *args, **kwargs):
"""
`Then5(f, ...)` is equivalent to `ThenAt(5, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2, arg3, arg4) + args
return self.ThenAt(5, f, *args, **kwargs)
def List(self, *branches, **kwargs):
"""
While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion.
The expression
k = List(f, g)
is equivalent to
k = lambda x: [ f(x), g(x) ]
In general, the following rules apply after compilation:
**General Branching**
List(f0, f1, ..., fn)
is equivalent to
lambda x: [ f0(x), f1(x), ..., fn(x) ]
**Composing & Branching**
It is interesting to see how braching interacts with composing. The expression
Seq(f, List(g, h))
is *almost* equivalent to
List( Seq(f, g), Seq(f, h) )
As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different
def _lambda(x):
x = f(x)
return [ g(x), h(x) ]
vs
lambda x: [ g(f(x)), h(f(x)) ]
As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function.
### Examples
form phi import P, List
avg_word_length = P.Pipe(
"1 22 333",
lambda s: s.split(' '), # ['1', '22', '333']
lambda l: map(len, l), # [1, 2, 3]
List(
sum # 1 + 2 + 3 == 6
,
len # len([1, 2, 3]) == 3
),
lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2
)
assert avg_word_length == 2
The previous could also be done more briefly like this
form phi import P, Obj, List
avg_word_length = P.Pipe(
"1 22 333", Obj
.split(' ') # ['1', '22', '333']
.map(len) # [1, 2, 3]
.List(
sum #sum([1, 2, 3]) == 6
,
len #len([1, 2, 3]) == 3
),
P[0] / P[1] #6 / 3 == 2
)
assert avg_word_length == 2
In the example above the last expression
P[0] / P[1]
works for a couple of reasons
1. The previous expression returns a list
2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]`
3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression
(P * 2) / (P + 1)
compile to a function of the form
lambda x: (x * 2) / (x + 1)
Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html).
"""
gs = [ _parse(code)._f for code in branches ]
def h(x, state):
ys = []
for g in gs:
y, state = g(x, state)
ys.append(y)
return (ys, state)
return self.__then__(h, **kwargs)
def Tuple(self, *expressions, **kwargs):
return self.List(*expressions) >> tuple
def Set(self, *expressions, **kwargs):
return self.List(*expressions) >> set
def Seq(self, *sequence, **kwargs):
"""
`Seq` is used to express function composition. The expression
Seq(f, g)
be equivalent to
lambda x: g(f(x))
As you see, its a little different from the mathematical definition. Excecution order flow from left to right, this makes reading and reasoning about code way more easy. This bahaviour is based upon the `|>` (pipe) operator found in languages like F#, Elixir and Elm. You can pack as many expressions as you like and they will be applied in order to the data that is passed through them when compiled an excecuted.
In general, the following rules apply for Seq:
**General Sequence**
Seq(f0, f1, ..., fn-1, fn)
is equivalent to
lambda x: fn(fn-1(...(f1(f0(x)))))
**Single Function**
Seq(f)
is equivalent to
f
**Identity**
The empty Seq
Seq()
is equivalent to
lambda x: x
### Examples
from phi import P, Seq
f = Seq(
P * 2,
P + 1,
P ** 2
)
assert f(1) == 9 # ((1 * 2) + 1) ** 2
The previous example using `P.Pipe`
from phi import P
assert 9 == P.Pipe(
1,
P * 2, #1 * 2 == 2
P + 1, #2 + 1 == 3
P ** 2 #3 ** 2 == 9
)
"""
fs = [ _parse(elem)._f for elem in sequence ]
def g(x, state):
return functools.reduce(lambda args, f: f(*args), fs, (x, state))
return self.__then__(g, **kwargs)
def Dict(self, **branches):
gs = { key : _parse(value)._f for key, value in branches.items() }
def h(x, state):
ys = {}
for key, g in gs.items():
y, state = g(x, state)
ys[key] = y
return _RecordObject(**ys), state
return self.__then__(h)
@property
def Rec(self):
"""
`phi.dsl.Expression.List` provides you a way to branch the computation as a list, but access to the values of each branch are then done by index, this might be a little inconvenient because it reduces readability. `Rec` branches provide a way to create named branches via `Rec(**kwargs)` where the keys are the names of the branches and the values are valid expressions representing the computation of that branch.
A special object is returned by this expression when excecuted, this object derives from `dict` and fully emulates it so you can treat it as such, however it also implements the `__getattr__` method, this lets you access a value as if it where a field
### Examples
from phi import P, Rec
stats = P.Pipe(
[1,2,3],
Rec(
sum = sum
,
len = len
)
)
assert stats.sum == 6
assert stats.len == 3
assert stats['sum'] == 6
assert stats['len'] == 3
Now lets image that we want to find the average value of the list, we could calculate it outside of the pipe doing something like `avg = stats.sum / stats.len`, however we could also do it inside the pipe using `Rec` field access lambdas
from phi import P, Rec
avg = P.Pipe(
[1,2,3],
Rec(
sum = sum #6
,
len = len #3
),
Rec.sum / Rec.len #6 / 3 == 2
)
assert avg == 2
"""
return _RecordProxy(self)
def With(self, context_manager, *body, **kwargs):
"""
**With**
def With(context_manager, *body):
**Arguments**
* **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager.
* ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed.
As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With`
**Context**
Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function.
### Examples
from phi import P, Obj, Context, With, Pipe
text = Pipe(
"text.txt",
With( open, Context,
Obj.read()
)
)
The previous is equivalent to
with open("text.txt") as f:
text = f.read()
"""
context_f = _parse(context_manager)._f
body_f = E.Seq(*body)._f
def g(x, state):
context, state = context_f(x, state)
with context as scope:
with _WithContextManager(scope):
return body_f(x, state)
return self.__then__(g, **kwargs)
@property
def Read(self):
"""
Giving names and saving parts of your computation to use then latter is useful to say the least. In Phi the expression
Write(x = expr)
creates a reference `x` given the value of `expr` which you can call latter. To read the previous you would use any of the following expressions
Read('x')
Read.x
### Example
Lets see a common situation where you would use this
from phi import P, List, Seq, Read, Write
result = P.Pipe(
input,
Write(ref = f1),
f2,
List(
f3
,
Seq(
Read('ref'),
f4
)
)
)
Here you *save* the value outputed by `fun_1` and the load it as the initial value of the second branch. In normal python the previous would be *almost* equivalent to
x = f1(input)
ref = x
x = f2(x)
result = [
f3(x)
,
f4(ref)
]
"""
return _ReadProxy(self)
def ReadList(self, *branches, **kwargs):
"""
Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`.
"""
branches = map(lambda x: E.Read(x) if isinstance(x, str) else x, branches)
return self.List(*branches, **kwargs)
def Write(self, *state_args, **state_dict):
"""See `phi.dsl.Expression.Read`"""
if len(state_dict) + len(state_args) < 1:
raise Exception("Please include at-least 1 state variable, got {0} and {1}".format(state_args, state_dict))
if len(state_dict) > 1:
raise Exception("Please include at-most 1 keyword argument expression, got {0}".format(state_dict))
if len(state_dict) > 0:
state_key = next(iter(state_dict.keys()))
write_expr = state_dict[state_key]
state_args += (state_key,)
expr = self >> write_expr
else:
expr = self
def g(x, state):
update = { key: x for key in state_args }
state = utils.merge(state, update)
#side effect for convenience
_StateContextManager.REFS.update(state)
return x, state
return expr.__then__(g)
@property
def Rec(self):
"""
`Rec` is a `property` that returns an object that defines the `__getattr__` and `__getitem__` methods which when called help you create lambdas that emulates a field access. The following expression
Rec.some_field
is equivalent to
lambda rec: rec.some_field
**Examples**
from phi import P, Obj, Rec
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def flip_cords(self):
y = self.y
self.y = self.x
self.x = y
assert 4 == P.Pipe(
Point(1, 2), # point(x=1, y=2)
Obj.flip_cords(), # point(x=2, y=1)
Rec.x, # point.x = 2
P * 2 # 2 * 2 = 4
)
**Also see**
* `phi.builder.Builder.Obj`
* `phi.builder.Builder.Read`
* `phi.builder.Builder.Write`
"""
return _RecordProxy(self)
@property
def Obj(self):
"""
`Obj` is a `property` that returns an object that defines the `__getattr__` method which when called helps you create a partial that emulates a method call. The following expression
Obj.some_method(x1, x2, ...)
is equivalent to
lambda obj: obj.some_method(x1, x2, ...)
**Examples**
from phi import P, Obj
assert "hello world" == P.Pipe(
" HELLO HELLO {0} ",
Obj.format("WORLD"), # " HELLO HELLO WORLD "
Obj.strip(), # "HELLO HELLO WORLD"
Obj.lower() # "hello hello world"
Obj.split(' ') # ["hello", "hello", "world"]
Obj.count("hello") # 2
)
**Also see**
* `phi.builder.Builder.Rec`
* [dsl.Write](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Write)
* `phi.builder.Builder.Write`
"""
return _ObjectProxy(self)
@property
def Ref(self):
"""
Returns an object that helps you to inmediatly create and [read](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Read) [references](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Ref).
**Creating Refences**
You can manually create a [Ref](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Ref) outside the DSL using `Ref` and then pass to as/to a [Read](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Read) or [Write](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Write) expression. Here is a contrived example
from phi import P
r = P.Ref('r')
assert [600, 3, 6] == P.Pipe(
2,
P + 1, {'a'}, # a = 2 + 1 = 3
P * 2, {'b'}, # b = 3 * 2 = 6
P * 100, {'c', r }, # c = r = 6 * 100 = 600
['c', 'a', 'b']
)
assert r() == 600
**Reading Refences from the Current Context**
While the expression `Read.a` with return a function that will discard its argument and return the value of the reference `x` in the current context, the expression `Ref.x` will return the value inmediatly, this is useful when using it inside pyton lambdas.
Read.x(None) <=> Ref.x
As an example
from phi import P, Obj, Ref
assert {'a': 97, 'b': 98, 'c': 99} == P.Pipe(
"a b c", Obj
.split(' ').Write.keys # keys = ['a', 'b', 'c']
.map(ord), # [ord('a'), ord('b'), ord('c')] == [97, 98, 99]
lambda it: zip(Ref.keys, it), # [('a', 97), ('b', 98), ('c', 99)]
dict # {'a': 97, 'b': 98, 'c': 99}
)
"""
return _RefProxyInstance
def Val(self, val, **kwargs):
"""
The expression
Val(a)
is equivalent to the constant function
lambda x: a
All expression in this module interprete values that are not functions as constant functions using `Val`, for example
Seq(1, P + 1)
is equivalent to
Seq(Val(1), P + 1)
The previous expression as a whole is a constant function since it will return `2` no matter what input you give it.
"""
f = utils.lift(lambda z: val)
return self.__then__(f, **kwargs)
def If(self, condition, *then, **kwargs):
"""
**If**
If(Predicate, *Then)
Having conditionals expressions a necesity in every language, Phi includes the `If` expression for such a purpose.
**Arguments**
* **Predicate** : a predicate expression uses to determine if the `Then` or `Else` branches should be used.
* ***Then** : an expression to be excecuted if the `Predicate` yields `True`, since this parameter is variadic you can stack expression and they will be interpreted as a tuple `phi.dsl.Seq`.
This class also includes the `Elif` and `Else` methods which let you write branched conditionals in sequence, however the following rules apply
* If no branch is entered the whole expression behaves like the identity
* `Elif` can only be used after an `If` or another `Elif` expression
* Many `Elif` expressions can be stacked sequentially
* `Else` can only be used after an `If` or `Elif` expression
** Examples **
from phi import P, If
assert "Between 2 and 10" == P.Pipe(
5,
If(P > 10,
"Greater than 10"
).Elif(P < 2,
"Less than 2"
).Else(
"Between 2 and 10"
)
)
"""
cond_f = _parse(condition)._f
then_f = E.Seq(*then)._f
else_f = utils.state_identity
ast = (cond_f, then_f, else_f)
g = _compile_if(ast)
expr = self.__then__(g, **kwargs)
expr._ast = ast
expr._root = self
return expr
def Else(self, *Else, **kwargs):
"""See `phi.dsl.Expression.If`"""
root = self._root
ast = self._ast
next_else = E.Seq(*Else)._f
ast = _add_else(ast, next_else)
g = _compile_if(ast)
return root.__then__(g, **kwargs)
#Else.__doc__ = If.__doc__
def Elif(self, condition, *then, **kwargs):
"""See `phi.dsl.Expression.If`"""
root = self._root
ast = self._ast
cond_f = _parse(condition)._f
then_f = E.Seq(*then)._f
else_f = utils.state_identity
next_else = (cond_f, then_f, else_f)
ast = _add_else(ast, next_else)
g = _compile_if(ast)
expr = root.__then__(g, **kwargs)
expr._ast = ast
expr._root = root
return expr
#Elif.__doc__ = If.__doc__
@staticmethod
def Context(*args):
"""
**Builder Core**. Also available as a global function as `phi.Context`.
Returns the context object of the current `dsl.With` statemente.
**Arguments**
* ***args**: By design `Context` accepts any number of arguments and completely ignores them.
This is a classmethod and it doesnt return a `Builder`/`Expression` by design so it can be called directly:
from phi import P, Context, Obj
def read_file(z):
f = Context()
return f.read()
lines = P.Pipe(
"text.txt",
P.With( open,
read_file,
Obj.split("\\n")
)
)
Here we called `Context` with no arguments to get the context back, however, since you can also give this function an argument (which it will ignore) it can be passed to the DSL so we can rewrite the previous as:
from phi import P, Context, Obj
lines = P.Pipe(
"text.txt",
P.With( open,
Context, # f
Obj.read()
Obj.split("\\n")
)
)
`Context` yields an exception when used outside of a `With` block.
**Also see**
* `phi.builder.Builder.Obj`
* [dsl](https://cgarciae.github.io/phi/dsl.m.html)
"""
if _WithContextManager.WITH_GLOBAL_CONTEXT is utils.NO_VALUE:
raise Exception("Cannot use 'Context' outside of a 'With' block")
return _WithContextManager.WITH_GLOBAL_CONTEXT
###############
## Operators
###############
def __rshift__(self, other):
f = _parse(other)._f
return self.__then__(f)
def __rrshift__(self, prev):
prev = _parse(prev)
return prev.__then__(self._f)
__rlshift__ = __rshift__
__lshift__ = __rrshift__
## The Rest
def __unit__(self, f, _return_type=None):
"Monadic unit, also known as `return`"
if _return_type:
return _return_type(f)
else:
return self.__class__(f)
def __then__(self, other, **kwargs):
f = self._f
g = other
h = lambda x, state: g(*f(x, state))
return self.__unit__(h, **kwargs)
## Override operators
def __call__(self, __x__, *__return_state__, **state):
x = __x__
return_state = __return_state__
if len(return_state) == 1 and type(return_state[0]) is not bool:
raise Exception("Invalid return state condition, got {return_state}".format(return_state=return_state))
with _StateContextManager(state):
y, next_state = self._f(x, state)
return (y, next_state) if len(return_state) >= 1 and return_state[0] else y
def __getitem__(self, key):
f = utils.lift(lambda x: x[key])
return self.__then__(f)
__add__ = _fmap(operator.add)
__mul__ = _fmap(operator.mul)
__sub__ = _fmap(operator.sub)
__mod__ = _fmap(operator.mod)
__pow__ = _fmap(operator.pow)
__and__ = _fmap(operator.and_)
__or__ = _fmap(operator.or_)
__xor__ = _fmap(operator.xor)
__div__ = _fmap(operator.truediv)
__divmod__ = _fmap(divmod)
__floordiv__ = _fmap(operator.floordiv)
__truediv__ = _fmap(operator.truediv)
__contains__ = _fmap(operator.contains)
__lt__ = _fmap(operator.lt)
__le__ = _fmap(operator.le)
__gt__ = _fmap(operator.gt)
__ge__ = _fmap(operator.ge)
__eq__ = _fmap(operator.eq)
__ne__ = _fmap(operator.ne)
__neg__ = _unary_fmap(operator.neg)
__pos__ = _unary_fmap(operator.pos)
__invert__ = _unary_fmap(operator.invert)
__radd__ = _fmap_flip(operator.add)
__rmul__ = _fmap_flip(operator.mul)
__rsub__ = _fmap_flip(operator.sub)
__rmod__ = _fmap_flip(operator.mod)
__rpow__ = _fmap_flip(operator.pow)
__rdiv__ = _fmap_flip(operator.truediv)
__rdivmod__ = _fmap_flip(divmod)
__rtruediv__ = _fmap_flip(operator.truediv)
__rfloordiv__ = _fmap_flip(operator.floordiv)
__rand__ = _fmap_flip(operator.and_)
__ror__ = _fmap_flip(operator.or_)
__rxor__ = _fmap_flip(operator.xor)
###############
## End
###############
E = Expression()
def _add_else(ast, next_else):
if hasattr(ast, "__call__"):
return next_else
cond, then, Else = ast
return (cond, then, _add_else(Else, next_else))
def _compile_if(ast):
if hasattr(ast, "__call__"):
return ast
cond, then, Else = ast
Else = _compile_if(Else)
def g(x, state):
y_cond, state = cond(x, state)
return then(x, state) if y_cond else Else(x, state)
return g
#######################
### FUNCTIONS
#######################
def _parse(code):
#if type(code) is tuple:
if isinstance(code, Expression):
return code
elif hasattr(code, '__call__') or isclass(code):
return Expression(utils.lift(code))
elif isinstance(code, list):
return E.List(*code)
elif isinstance(code, tuple):
return E.Tuple(*code)
elif isinstance(code, set):
return E.Set(*code)
elif isinstance(code, dict):
return E.Dict(**code)
else:
return E.Val(code)
|
cgarciae/phi
|
phi/dsl.py
|
Python
|
mit
| 41,557
|
[
"VisIt"
] |
d67d088f9306eca9530393811f62ce91f70994d9b384c980d91131e096a2b2a4
|
from ovito.io import import_file
from ovito.data import CutoffNeighborFinder
# Load input simulation file.
node = import_file("simulation.dump")
data = node.source
# Initialize neighbor finder object:
cutoff = 3.5
finder = CutoffNeighborFinder(cutoff, data)
# Loop over all input particles:
for index in range(data.number_of_particles):
print("Neighbors of particle %i:" % index)
# Iterate over the neighbors of the current particle:
for neigh in finder.find(index):
print(neigh.index, neigh.distance, neigh.delta, neigh.pbc_shift)
|
srinath-chakravarthy/ovito
|
doc/python/example_snippets/cutoff_neighbor_finder.py
|
Python
|
gpl-3.0
| 554
|
[
"OVITO"
] |
173f49f5599c533956944795e2cccc7a490115bf7f474e35cc67fecc73fe1713
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
{%- if cookiecutter.use_async == 'y' %}
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
{%- endif %}
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
{%- if cookiecutter.use_drf == 'y' %}
from rest_framework.authtoken.views import obtain_auth_token
{%- endif %}
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("{{ cookiecutter.project_slug }}.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
{%- if cookiecutter.use_async == 'y' %}
if settings.DEBUG:
# Static file serving when using Gunicorn + Uvicorn for local web socket development
urlpatterns += staticfiles_urlpatterns()
{%- endif %}
{% if cookiecutter.use_drf == 'y' %}
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
]
{%- endif %}
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
trungdong/cookiecutter-django
|
{{cookiecutter.project_slug}}/config/urls.py
|
Python
|
bsd-3-clause
| 2,353
|
[
"VisIt"
] |
7268e0d85891b890ec164ed12ce35f676ec4e353c4b378686425f1626eb9cc0b
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".tablestyle")
#------------------------------------------------------------------------
#
# TableStyle
#
#------------------------------------------------------------------------
class TableStyle(object):
"""
Specifies the style or format of a table. The TableStyle contains the
characteristics of table width (in percentage of the full width), the
number of columns, and the width of each column as a percentage of the
width of the table.
"""
def __init__(self, obj=None):
"""
Create a new TableStyle object, with the values initialized to
empty, with allocating space for up to 100 columns.
@param obj: if not None, then the object created gets is attributes
from the passed object instead of being initialized to empty.
"""
if obj:
self.width = obj.width
self.columns = obj.columns
self.colwid = obj.colwid[:]
else:
self.width = 0
self.columns = 0
self.colwid = [ 0 ] * 100
def set_width(self, width):
"""
Set the width of the table in terms of percent of the available
width
"""
self.width = width
def get_width(self):
"""
Return the specified width as a percentage of the available space
"""
return self.width
def set_columns(self, columns):
"""
Set the number of columns.
@param columns: number of columns that should be used.
"""
self.columns = columns
def get_columns(self):
"""
Return the number of columns
"""
return self.columns
def set_column_widths(self, clist):
"""
Set the width of all the columns at once, taking the percentages
from the passed list.
"""
self.columns = len(clist)
for i in range(self.columns):
self.colwid[i] = clist[i]
def set_column_width(self, index, width):
"""
Set the width of a specified column to the specified width.
@param index: column being set (index starts at 0)
@param width: percentage of the table width assigned to the column
"""
self.colwid[index] = width
def get_column_width(self, index):
"""
Return the column width of the specified column as a percentage of
the entire table width.
@param index: column to return (index starts at 0)
"""
return self.colwid[index]
#------------------------------------------------------------------------
#
# TableCellStyle
#
#------------------------------------------------------------------------
class TableCellStyle(object):
"""
Defines the style of a particular table cell. Characteristics are:
right border, left border, top border, bottom border, and padding.
"""
def __init__(self, obj=None):
"""
Create a new TableCellStyle instance.
@param obj: if not None, specifies that the values should be
copied from the passed object instead of being initialized to empty.
"""
if obj:
self.rborder = obj.rborder
self.lborder = obj.lborder
self.tborder = obj.tborder
self.bborder = obj.bborder
self.padding = obj.padding
self.longlist = obj.longlist
else:
self.rborder = 0
self.lborder = 0
self.tborder = 0
self.bborder = 0
self.padding = 0
self.longlist = 0
def set_padding(self, val):
"Return the cell padding in centimeters"
self.padding = val
def set_borders(self, val):
"""
Defines if a border is used
@param val: if True, a border is used, if False, it is not
"""
self.rborder = val
self.lborder = val
self.tborder = val
self.bborder = val
def set_right_border(self, val):
"""
Defines if a right border in used
@param val: if True, a right border is used, if False, it is not
"""
self.rborder = val
def set_left_border(self, val):
"""
Defines if a left border in used
@param val: if True, a left border is used, if False, it is not
"""
self.lborder = val
def set_top_border(self, val):
"""
Defines if a top border in used
@param val: if True, a top border is used, if False, it is not
"""
self.tborder = val
def set_bottom_border(self, val):
"""
Defines if a bottom border in used
@param val: if 1, a bottom border is used, if 0, it is not
"""
self.bborder = val
def set_longlist(self, val):
self.longlist = val
def get_padding(self):
"Return the cell padding in centimeters"
return self.padding
def get_right_border(self):
"Return 1 if a right border is requested"
return self.rborder
def get_left_border(self):
"Return 1 if a left border is requested"
return self.lborder
def get_top_border(self):
"Return 1 if a top border is requested"
return self.tborder
def get_bottom_border(self):
"Return 1 if a bottom border is requested"
return self.bborder
def get_longlist(self):
return self.longlist
|
Forage/Gramps
|
gramps/gen/plug/docgen/tablestyle.py
|
Python
|
gpl-2.0
| 6,977
|
[
"Brian"
] |
f50d33fc06d844a295f0661b90e4709a959abe51a89b11b53440c35e5365454b
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.compat import compat
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session(use_gpu=True):
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session(use_gpu=True):
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "A grayscale image must be at least two-dimensional"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
@test_util.run_deprecated_v1
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.adjust_gamma(x, gamma=-1)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.adjust_gamma(x, gamma=-1)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
image = image_ops.adjust_gamma(x, gamma=y)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(errors.InvalidArgumentError, err_msg):
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(y.eval())
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = y.eval()
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
@test_util.run_deprecated_v1
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
@test_util.run_deprecated_v1
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
@test_util.run_deprecated_v1
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
@test_util.run_deprecated_v1
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session(use_gpu=True):
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = image_ops.adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
self.assertTrue(y.op.name.startswith("flip_left_right"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
@test_util.run_deprecated_v1
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
self.assertTrue(y.op.name.startswith("flip_up_down"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
@test_util.run_deprecated_v1
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
self.assertTrue(y.op.name.startswith("transpose"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegexp(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
@test_util.run_deprecated_v1
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
@test_util.run_deprecated_v1
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@test_util.run_deprecated_v1
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.float32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
self.assertTrue(y.op.name.startswith("per_image_standardization"))
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session(use_gpu=True):
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session(use_gpu=True):
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
def testPreservesDtype(self):
imgs_npu8 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.uint8)
imgs_tfu8 = constant_op.constant(imgs_npu8)
whiten_tfu8 = image_ops.per_image_standardization(imgs_tfu8)
self.assertEqual(whiten_tfu8.dtype, dtypes.uint8)
imgs_npf16 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.float16)
imgs_tff16 = constant_op.constant(imgs_npf16)
whiten_tff16 = image_ops.per_image_standardization(imgs_tff16)
self.assertEqual(whiten_tff16.dtype, dtypes.float16)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
@test_util.run_deprecated_v1
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([
0, -1, 3, 3
], "offset_width must be >= 0"), ([0, 0, 0, 3],
"target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
@test_util.run_deprecated_v1
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)
y = image_ops.central_crop(x, 0.33)
y_tf = y.eval(feed_dict={x: x_np})
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
@test_util.run_deprecated_v1
def testNameScope(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session(use_gpu=True):
self.assertAllClose(y, self.evaluate(y_tf))
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
@test_util.run_deprecated_v1
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_placeholder,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval(feed_dict={
min_object_covered_placeholder: min_object_covered
})
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
@test_util.run_deprecated_v1
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
@test_util.run_deprecated_v1
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
@test_util.run_deprecated_v1
def testSampleDistortedBoundingBoxShape(self):
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
class ResizeImagesV2Test(test_util.TensorFlowTestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
@test_util.run_deprecated_v1
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.run_deprecated_v1
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, new_size, method)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, new_size, self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
@test_util.run_deprecated_v1
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
@test_util.run_deprecated_v1
def testNameScope(self):
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
@test_util.run_deprecated_v1
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(
x, x_shape, [250, 250], [10, 250, 250, 10], preserve_aspect_ratio=False)
@test_util.run_deprecated_v1
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImagesTest(test_util.TensorFlowTestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
@test_util.run_deprecated_v1
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
@test_util.run_deprecated_v1
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, method)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethodV1.BILINEAR)
@test_util.run_deprecated_v1
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
@test_util.run_deprecated_v1
def testNameScope(self):
img_shape = [1, 3, 2, 1]
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(x_tensor, target_max,
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
@test_util.run_deprecated_v1
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
@test_util.run_deprecated_v1
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
# Combined decode+crop should have the same shape inference
self.assertAllEqual(image1_crop.get_shape().as_list(),
image2.get_shape().as_list())
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
@test_util.run_deprecated_v1
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
lambda e: "Invalid JPEG data or crop window" in str(e)):
self.evaluate(result)
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
@test_util.run_deprecated_v1
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
self.assertEqual(image_shape.tolist(), [256, 128, 3])
@test_util.run_deprecated_v1
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
# Cmyk jpeg image has 4 channels.
self.assertEqual(image_shape.tolist(), [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
if compat.forward_compatible(2019, 4, 4):
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session(use_gpu=True) as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session(use_gpu=True) as sess:
sess.run(adjust_jpeg_quality_image)
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.cached_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
@test_util.run_deprecated_v1
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
@test_util.run_deprecated_v1
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
@test_util.run_deprecated_v1
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
@test_util.run_deprecated_v1
def testConvertBetweenInt16AndInt8(self):
with self.cached_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
def testTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = io_ops.read_file(os.path.join(prefix, path)).eval()
images = {}
for name, decode in decoders.items():
image = decode(contents).eval()
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def NonMaxSuppressionTest(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
@test_util.run_deprecated_v1
def testInvalidShape(self):
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown.
with self.assertRaisesRegexp(ValueError,
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The max_output_size should be a scalar (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, [3], 0.5)
# The iou_threshold should be a scalar (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
@test_util.run_deprecated_v1
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 1.0
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@test_util.run_deprecated_v1
def testSelectFromContinuousOverLap(self):
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
@test_util.run_deprecated_v1
def testWrongDims(self):
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
@test_util.run_deprecated_v1
def testShapeMismatch(self):
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, "psnr1").eval()
tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, "psnr2").eval()
tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, "psnr3").eval()
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
@test_util.run_deprecated_v1
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session(use_gpu=True):
self.assertAllClose(
psnr_uint8.eval(), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
ssim = image_ops.ssim(
*ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [ssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
@test_util.run_deprecated_v1
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertLess(ssim.eval(), 0)
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
msssim = image_ops.ssim_multiscale(
*ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [msssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
@test_util.run_deprecated_v1
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
scaled_ph = [x * scalar for x in ph]
msssim = image_ops.ssim_multiscale(
*scaled_ph,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
grads = gradients.gradients(msssim, scalar)
with self.cached_session(use_gpu=True) as sess:
np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session(use_gpu=True) as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session(use_gpu=True):
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session(use_gpu=True):
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
class DecodeImageTest(test_util.TensorFlowTestCase):
def testJpegUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertEqual(len(image0.shape), 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
if __name__ == "__main__":
googletest.main()
|
ghchinoy/tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
Python
|
apache-2.0
| 202,288
|
[
"Gaussian"
] |
1d2a94725a61d62325eb1d666637b839e0421a481968c080b6640e1c8dc48a3d
|
"""
Acceptance tests for Studio related to the container page.
The container page is used both for displaying units, and
for displaying containers within units.
"""
import datetime
import ddt
from base_studio_test import ContainerBase
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.pages.studio.xblock_editor import XBlockEditorView, XBlockVisibilityEditorView
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.html_component_editor import HtmlXBlockEditorView
from common.test.acceptance.pages.studio.move_xblock import MoveModalView
from common.test.acceptance.pages.studio.utils import add_discussion
from common.test.acceptance.tests.helpers import create_user_partition_json
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
class NestedVerticalTest(ContainerBase):
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
self.container_title = ""
self.group_a = "Group A"
self.group_b = "Group B"
self.group_empty = "Group Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = u"Duplicate of '{0}'"
self.discussion_label = "Discussion"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
)
@attr(shard=1)
class AddComponentTest(NestedVerticalTest):
"""
Tests of adding a component to the container page.
"""
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda container: add_discussion(container, menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
@attr(shard=1)
class DuplicateComponentTest(NestedVerticalTest):
"""
Tests of duplicating a component on the container page.
"""
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda container: container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
@attr(shard=1)
class DeleteComponentTest(NestedVerticalTest):
"""
Tests of deleting a component from the container page.
"""
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda container: container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
# Group A itself has a delete icon now, so item_1 is index 1 instead of 0.
group_a_item_1_delete_index = 1
self.delete_and_verify(group_a_item_1_delete_index, expected_ordering)
@attr(shard=16)
class EditContainerTest(NestedVerticalTest):
"""
Tests of editing a container.
"""
def modify_display_name_and_verify(self, component):
"""
Helper method for changing a display name.
"""
modified_name = 'modified'
self.assertNotEqual(component.name, modified_name)
component.edit()
component_editor = XBlockEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Display Name', modified_name)
self.assertEqual(component.name, modified_name)
def test_edit_container_on_unit_page(self):
"""
Test the "edit" button on a container appearing on the unit page.
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
self.modify_display_name_and_verify(component)
def test_edit_container_on_container_page(self):
"""
Test the "edit" button on a container appearing on the container page.
"""
container = self.go_to_nested_container_page()
self.modify_display_name_and_verify(container)
class BaseGroupConfigurationsTest(ContainerBase):
ALL_LEARNERS_AND_STAFF = XBlockVisibilityEditorView.ALL_LEARNERS_AND_STAFF
CHOOSE_ONE = "Select a group type"
CONTENT_GROUP_PARTITION = XBlockVisibilityEditorView.CONTENT_GROUP_PARTITION
ENROLLMENT_TRACK_PARTITION = XBlockVisibilityEditorView.ENROLLMENT_TRACK_PARTITION
MISSING_GROUP_LABEL = 'Deleted Group\nThis group no longer exists. Choose another group or remove the access restriction.'
VALIDATION_ERROR_LABEL = 'This component has validation issues.'
VALIDATION_ERROR_MESSAGE = "Error:\nThis component's access settings refer to deleted or invalid groups."
GROUP_VISIBILITY_MESSAGE = 'Access to some content in this unit is restricted to specific groups of learners.'
MODAL_NOT_RESTRICTED_MESSAGE = "Access is not restricted"
def setUp(self):
super(BaseGroupConfigurationsTest, self).setUp()
# Set up a cohort-schemed user partition
self.id_base = MINIMUM_STATIC_PARTITION_ID
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
self.id_base,
self.CONTENT_GROUP_PARTITION,
'Content Group Partition',
[
Group(self.id_base + 1, 'Dogs'),
Group(self.id_base + 2, 'Cats')
],
scheme="cohort"
)
],
},
})
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
def populate_course_fixture(self, course_fixture):
"""
Populate a simple course a section, subsection, and unit, and HTML component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Component')
)
)
)
)
def edit_component_visibility(self, component):
"""
Edit the visibility of an xblock on the container page and returns an XBlockVisibilityEditorView.
"""
component.edit_visibility()
return XBlockVisibilityEditorView(self.browser, component.locator)
def edit_unit_visibility(self, unit):
"""
Edit the visibility of a unit on the container page and returns an XBlockVisibilityEditorView.
"""
unit.edit_visibility()
return XBlockVisibilityEditorView(self.browser, unit.locator)
def verify_current_groups_message(self, visibility_editor, expected_current_groups):
"""
Check that the current visibility is displayed at the top of the dialog.
"""
if expected_current_groups == self.ALL_LEARNERS_AND_STAFF:
self.assertEqual("Access is not restricted", visibility_editor.current_groups_message)
else:
self.assertEqual(
u"Access is restricted to: {groups}".format(groups=expected_current_groups),
visibility_editor.current_groups_message
)
def verify_selected_partition_scheme(self, visibility_editor, expected_scheme):
"""
Check that the expected partition scheme is selected.
"""
self.assertItemsEqual(expected_scheme, visibility_editor.selected_partition_scheme)
def verify_selected_groups(self, visibility_editor, expected_groups):
"""
Check the expected partition groups.
"""
self.assertItemsEqual(expected_groups, [group.text for group in visibility_editor.selected_groups])
def select_and_verify_saved(self, component, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the container page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but not necessarily checked.
"""
# Make initial edit(s) and save
visibility_editor = self.edit_component_visibility(component)
visibility_editor.select_groups_in_partition_scheme(partition_label, groups)
# Re-open the modal and inspect its selected inputs. If no groups were selected,
# "All Learners" should be selected partitions scheme, and we show "Select a group type" in the select.
if not groups:
partition_label = self.CHOOSE_ONE
visibility_editor = self.edit_component_visibility(component)
self.verify_selected_partition_scheme(visibility_editor, partition_label)
self.verify_selected_groups(visibility_editor, groups)
visibility_editor.save()
def select_and_verify_unit_group_access(self, unit, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the unit page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but are not necessarily checked.
"""
unit_access_editor = self.edit_unit_visibility(unit)
unit_access_editor.select_groups_in_partition_scheme(partition_label, groups)
if not groups:
partition_label = self.CHOOSE_ONE
unit_access_editor = self.edit_unit_visibility(unit)
self.verify_selected_partition_scheme(unit_access_editor, partition_label)
self.verify_selected_groups(unit_access_editor, groups)
unit_access_editor.save()
def verify_component_validation_error(self, component):
"""
Verify that we see validation errors for the given component.
"""
self.assertTrue(component.has_validation_error)
self.assertEqual(component.validation_error_text, self.VALIDATION_ERROR_LABEL)
self.assertEqual([self.VALIDATION_ERROR_MESSAGE], component.validation_error_messages)
def verify_visibility_set(self, component, is_set):
"""
Verify that the container page shows that component visibility
settings have been edited if `is_set` is True; otherwise
verify that the container page shows no such information.
"""
if is_set:
self.assertIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertTrue(component.has_group_visibility_set)
else:
self.assertNotIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertFalse(component.has_group_visibility_set)
def verify_unit_visibility_set(self, unit, set_groups=[]):
"""
Verify that the container visibility modal shows that unit visibility
settings have been edited if there are `set_groups`. Otherwise verify
that the modal shows no such information.
"""
unit_access_editor = self.edit_unit_visibility(unit)
if set_groups:
self.assertIn(", ".join(set_groups), unit_access_editor.current_groups_message)
else:
self.assertEqual(self.MODAL_NOT_RESTRICTED_MESSAGE, unit_access_editor.current_groups_message)
unit_access_editor.cancel()
def update_component(self, component, metadata):
"""
Update a component's metadata and refresh the page.
"""
self.course_fixture._update_xblock(component.locator, {'metadata': metadata})
self.browser.refresh()
self.container_page.wait_for_page()
def remove_missing_groups(self, visibility_editor, component):
"""
Deselect the missing groups for a component. After save,
verify that there are no missing group messages in the modal
and that there is no validation error on the component.
"""
for option in visibility_editor.all_group_options:
if option.text == self.MISSING_GROUP_LABEL:
option.click()
visibility_editor.save()
visibility_editor = self.edit_component_visibility(component)
self.assertNotIn(self.MISSING_GROUP_LABEL, [item.text for item in visibility_editor.all_group_options])
visibility_editor.cancel()
self.assertFalse(component.has_validation_error)
@attr(shard=21)
class UnitAccessContainerTest(BaseGroupConfigurationsTest):
"""
Tests unit level access
"""
GROUP_RESTRICTED_MESSAGE = 'Access to this unit is restricted to: Dogs'
def _toggle_container_unit_access(self, group_ids, unit):
"""
Toggle the unit level access on the course outline page
"""
unit.toggle_unit_access('Content Groups', group_ids)
def _verify_container_unit_access_message(self, group_ids, expected_message):
"""
Check that the container page displays the correct unit
access message.
"""
self.outline.visit()
self.outline.expand_all_subsections()
unit = self.outline.section_at(0).subsection_at(0).unit_at(0)
self._toggle_container_unit_access(group_ids, unit)
container_page = self.go_to_unit_page()
self.assertEqual(str(container_page.get_xblock_access_message()), expected_message)
def test_default_selection(self):
"""
Tests that no message is displayed when there are no
restrictions on the unit or components.
"""
self._verify_container_unit_access_message([], '')
def test_restricted_components_message(self):
"""
Test that the proper message is displayed when access to
some components is restricted.
"""
container_page = self.go_to_unit_page()
html_component = container_page.xblocks[1]
# Initially set visibility to Dog group.
self.update_component(
html_component,
{'group_access': {self.id_base: [self.id_base + 1]}}
)
self._verify_container_unit_access_message([], self.GROUP_VISIBILITY_MESSAGE)
def test_restricted_access_message(self):
"""
Test that the proper message is displayed when access to the
unit is restricted to a particular group.
"""
self._verify_container_unit_access_message([self.id_base + 1], self.GROUP_RESTRICTED_MESSAGE)
@attr(shard=9)
class ContentGroupVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (content groups).
"""
def test_default_selection(self):
"""
Scenario: The component visibility modal selects visible to all by default.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
Then the default visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
visibility_dialog = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_dialog, self.ALL_LEARNERS_AND_STAFF)
self.verify_selected_partition_scheme(visibility_dialog, self.CHOOSE_ONE)
visibility_dialog.cancel()
self.verify_visibility_set(self.html_component, False)
def test_reset_to_all_students_and_staff(self):
"""
Scenario: The component visibility modal can be set to be visible to all students and staff.
Given I have a unit with one component
When I go to the container page for that unit
Then the container page should not display the content visibility warning by default.
If I then restrict access and save, and then I open the visibility editor modal for that unit's component
And I select 'All Students and Staff'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should still not display the content visibility warning
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_visibility_set(self.html_component, False)
def test_reset_unit_access_to_all_students_and_staff(self):
"""
Scenario: The unit visibility modal can be set to be visible to all students and staff.
Given I have a unit
When I go to the container page for that unit
And I open the visibility editor modal for that unit
And I select 'Dogs'
And I save the modal
Then I re-open the modal, the unit access modal should display the content visibility settings
Then after re-opening the modal again
And I select 'All Learners and Staff'
And I save the modal
And I re-open the modal, the unit access modal should display that no content is restricted
"""
self.select_and_verify_unit_group_access(self.container_page, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.verify_unit_visibility_set(self.container_page, set_groups=["Dogs"])
self.select_and_verify_unit_group_access(self.container_page, self.ALL_LEARNERS_AND_STAFF)
self.verify_unit_visibility_set(self.container_page)
def test_select_single_content_group(self):
"""
Scenario: The component visibility modal can be set to be visible to one content group.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs'
And I save the modal
Then the visibility selection should be 'Dogs' and 'Specific Content Groups'
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
def test_select_multiple_content_groups(self):
"""
Scenario: The component visibility modal can be set to be visible to multiple content groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs' and 'Cats'
And I save the modal
Then the visibility selection should be 'Dogs', 'Cats', and 'Specific Content Groups'
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs', 'Cats'])
def test_select_zero_content_groups(self):
"""
Scenario: The component visibility modal can not be set to be visible to 'Specific Content Groups' without
selecting those specific groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Specific Content Groups'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
self.select_and_verify_saved(
self.html_component, self.CONTENT_GROUP_PARTITION
)
self.verify_visibility_set(self.html_component, False)
def test_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And the container page should display the content visibility warning
And I de-select the missing groups
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And I should not see any validation errors on the component
And the container page should not display the content visibility warning
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
"Deleted Group, Deleted Group",
[self.MISSING_GROUP_LABEL] * 2
)
self.verify_visibility_set(self.html_component, False)
def test_found_and_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids and multiple known group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid and valid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And then if I de-select the missing groups
And I save the modal
Then the visibility selection should be the names of the valid groups.
And I should not see any validation errors on the component
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 1, self.id_base + 2, self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
'Dogs, Cats, Deleted Group, Deleted Group',
['Dogs', 'Cats'] + [self.MISSING_GROUP_LABEL] * 2
)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
expected_groups = ['Dogs', 'Cats']
self.verify_current_groups_message(visibility_editor, ", ".join(expected_groups))
self.verify_selected_groups(visibility_editor, expected_groups)
def _verify_and_remove_missing_content_groups(self, current_groups_message, all_group_labels):
self.verify_component_validation_error(self.html_component)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
self.verify_current_groups_message(visibility_editor, current_groups_message)
self.verify_selected_groups(visibility_editor, all_group_labels)
self.remove_missing_groups(visibility_editor, self.html_component)
@attr(shard=3)
class EnrollmentTrackVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (enrollment tracks).
"""
AUDIT_TRACK = "Audit Track"
VERIFIED_TRACK = "Verified Track"
def setUp(self):
super(EnrollmentTrackVisibilityModalTest, self).setUp()
# Add an audit mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'audit', mode_display_name=self.AUDIT_TRACK).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified',
mode_display_name=self.VERIFIED_TRACK, min_price=10
).visit()
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
# Initially set visibility to Verified track.
self.update_component(
self.html_component,
{'group_access': {ENROLLMENT_TRACK_PARTITION_ID: [2]}} # "2" is Verified
)
def verify_component_group_visibility_messsage(self, component, expected_groups):
"""
Verifies that the group visibility message below the component display name is correct.
"""
if not expected_groups:
self.assertIsNone(component.get_partition_group_message)
else:
self.assertEqual("Access restricted to: " + expected_groups, component.get_partition_group_message)
def test_setting_enrollment_tracks(self):
"""
Test that enrollment track groups can be selected.
"""
# Verify that the "Verified" Group is shown on the unit page (under the unit display name).
self.verify_component_group_visibility_messsage(self.html_component, "Verified Track")
# Open dialog with "Verified" already selected.
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_editor, self.VERIFIED_TRACK)
self.verify_selected_partition_scheme(
visibility_editor,
self.ENROLLMENT_TRACK_PARTITION
)
self.verify_selected_groups(visibility_editor, [self.VERIFIED_TRACK])
visibility_editor.cancel()
# Select "All Learners and Staff". The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_component_group_visibility_messsage(self.html_component, None)
# Select "Audit" enrollment track. The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ENROLLMENT_TRACK_PARTITION, [self.AUDIT_TRACK])
self.verify_component_group_visibility_messsage(self.html_component, "Audit Track")
@attr(shard=16)
class UnitPublishingTest(ContainerBase):
"""
Tests of the publishing control and related widgets on the Unit page.
"""
PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)"
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
LOCKED_STATUS = "Publishing Status\nVisible to Staff Only"
RELEASE_TITLE_RELEASED = "RELEASED:"
RELEASE_TITLE_RELEASE = "RELEASE:"
LAST_PUBLISHED = 'Last published'
LAST_SAVED = 'Draft saved on'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a single HTML child.
"""
self.html_content = '<p><strong>Body of HTML Unit.</strong></p>'
self.courseware = CoursewarePage(self.browser, self.course_id)
past_start_date = datetime.datetime(1974, 6, 22)
self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC"
future_start_date = datetime.datetime(2100, 9, 13)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test html', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unlocked Section',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content)
)
)
),
XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children(
XBlockFixtureDesc(
'sequential',
'Subsection With Locked Unit',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc(
'vertical',
'Locked Unit',
metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('discussion', '', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unreleased Section',
metadata={'start': future_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unreleased Unit')
)
)
)
def test_publishing(self):
"""
Scenario: The publish title changes based on whether or not draft content exists
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the title in the Publish information box is "Published and Live"
And the Publish button is disabled
And the last published text contains "Last published"
And the last saved text contains "Last published"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the last saved text contains "Draft saved on"
And the Publish button is enabled
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And the last published text contains "Last published"
And the last saved text contains "Last published"
"""
unit = self.go_to_unit_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
# Start date set in course fixture to 1970.
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"'
)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
# Should not be able to click on Publish action -- but I don't know how to test that it is not clickable.
# TODO: continue discussion with Muhammad and Jay about this.
# Add a component to the page so it will have unpublished changes.
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED)
unit.publish()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
def test_discard_changes(self):
"""
Scenario: The publish title changes after "Discard Changes" is clicked
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the Discard Changes button is disabled
And I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the Discard Changes button is enabled
And when I click the Discard Changes button
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.discard_changes()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_view_live_no_changes(self):
"""
Scenario: "View Live" shows published content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the View Live button is enabled
And when I click on the View Live button
Then I see the published content in LMS
"""
unit = self.go_to_unit_page()
self._view_published_version(unit)
self._verify_components_visible(['html'])
def test_view_live_changes(self):
"""
Scenario: "View Live" does not show draft content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click on the View Live button
Then I see the published content in LMS
And I do not see the unpublished component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
self._view_published_version(unit)
self._verify_components_visible(['html'])
self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0))
def test_view_live_after_publish(self):
"""
Scenario: "View Live" shows newly published content
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click the Publish button
And when I click on the View Live button
Then I see the newly published component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.publish()
self._view_published_version(unit)
self._verify_components_visible(['html', 'discussion'])
def test_initially_unlocked_visible_to_students(self):
"""
Scenario: An unlocked unit with release date in the past is visible to students
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
Then the unit has a warning that it is visible to students
And it is marked as "RELEASED" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"'
)
self._view_published_version(unit)
self._verify_student_view_visible(['problem'])
def test_locked_visible_to_staff_only(self):
"""
Scenario: After locking a unit with release date in the past, it is only visible to staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I select "Hide from students"
Then the unit does not have a warning that it is visible to students
And the unit does not display inherited staff lock
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
checked = unit.toggle_staff_lock()
self.assertTrue(checked)
self.assertFalse(unit.currently_visible_to_students)
self.assertFalse(unit.shows_inherited_staff_lock())
unit.verify_publish_title(self.LOCKED_STATUS)
self._view_published_version(unit)
# Will initially be in staff view, locked component should be visible.
self._verify_components_visible(['problem'])
# Switch to student view and verify not visible
self._verify_student_view_locked()
def test_initially_locked_not_visible_to_students(self):
"""
Scenario: A locked unit with release date in the past is not visible to students
Given I have a published locked unit with release date in the past
When I go to the unit page in Studio
Then the unit does not have a warning that it is visible to students
And it is marked as "RELEASE" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
unit.verify_publish_title(self.LOCKED_STATUS)
self.assertFalse(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASE,
self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"'
)
self._view_published_version(unit)
self._verify_student_view_locked()
def test_unlocked_visible_to_all(self):
"""
Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I deselect "Hide from students"
Then the unit does have a warning that it is visible to students
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
checked = unit.toggle_staff_lock()
self.assertFalse(checked)
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._view_published_version(unit)
# Will initially be in staff view, components always visible.
self._verify_components_visible(['discussion'])
# Switch to student view and verify visible.
self._verify_student_view_visible(['discussion'])
def test_explicit_lock_overrides_implicit_subsection_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
subsection = self.outline.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_explicit_lock_overrides_implicit_section_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
section = self.outline.section_at(0)
unit = section.subsection_at(0).unit_at(0)
section.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_cancel_does_not_create_draft(self):
"""
Scenario: Editing a component and then canceling does not create a draft version (TNL-399)
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of an HTML component and then press cancel
Then the content does not change
And the title in the Publish information box is "Published and Live"
And when I reload the page
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlXBlockEditorView(self.browser, component.locator).set_content_and_cancel("modified content")
self.assertEqual(component.student_content, "Body of HTML Unit.")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.browser.refresh()
unit.wait_for_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_delete_child_in_published_unit(self):
"""
Scenario: A published unit can be published again after deleting a child
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And delete the only component
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see an empty unit in LMS
"""
unit = self.go_to_unit_page()
unit.delete(0)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertEqual(0, self.courseware.num_xblock_components)
def test_published_not_live(self):
"""
Scenario: The publish title displays correctly for units that are not live
Given I have a published unit with no unpublished changes that releases in the future
When I go to the unit page in Studio
Then the title in the Publish information box is "Published (not yet released)"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published (not yet released)"
"""
unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit')
unit.verify_publish_title(self.PUBLISHED_STATUS)
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish()
unit.verify_publish_title(self.PUBLISHED_STATUS)
def _view_published_version(self, unit):
"""
Goes to the published version, then waits for the browser to load the page.
"""
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
self.courseware.wait_for_page()
def _verify_and_return_staff_page(self):
"""
Verifies that the browser is on the staff page and returns a StaffCoursewarePage.
"""
page = StaffCoursewarePage(self.browser, self.course_id)
page.wait_for_page()
return page
def _verify_student_view_locked(self):
"""
Verifies no component is visible when viewing as a student.
"""
page = self._verify_and_return_staff_page()
page.set_staff_view_mode('Learner')
page.wait_for(lambda: self.courseware.num_xblock_components == 0, 'No XBlocks visible')
def _verify_student_view_visible(self, expected_components):
"""
Verifies expected components are visible when viewing as a student.
"""
self._verify_and_return_staff_page().set_staff_view_mode('Learner')
self._verify_components_visible(expected_components)
def _verify_components_visible(self, expected_components):
"""
Verifies the expected components are visible (and there are no extras).
"""
self.assertEqual(len(expected_components), self.courseware.num_xblock_components)
for index, component in enumerate(expected_components):
self.assertEqual(component, self.courseware.xblock_component_type(index))
def _verify_release_date_info(self, unit, expected_title, expected_date):
"""
Verifies how the release date is displayed in the publishing sidebar.
"""
self.assertEqual(expected_title, unit.release_title)
self.assertEqual(expected_date, unit.release_date)
def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix):
"""
Verifies that last published and last saved messages respectively contain the given strings.
"""
self.assertIn(expected_published_prefix, unit.last_published_text)
self.assertIn(expected_saved_prefix, unit.last_saved_text)
def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page):
"""
Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked.
"""
self.assertTrue(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertFalse(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertTrue(unit_page.shows_inherited_staff_lock())
# TODO: need to work with Jay/Christine to get testing of "Preview" working.
# def test_preview(self):
# unit = self.go_to_unit_page()
# add_discussion(unit)
# unit.preview()
# self.assertEqual(2, self.courseware.num_xblock_components)
# self.assertEqual('html', self.courseware.xblock_component_type(0))
# self.assertEqual('discussion', self.courseware.xblock_component_type(1))
@attr(shard=3)
class DisplayNameTest(ContainerBase):
"""
Test consistent use of display_name_with_default
"""
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', None)
)
)
)
)
def test_display_name_default(self):
"""
Scenario: Given that an XBlock with a dynamic display name has been added to the course,
When I view the unit page and note the display name of the block,
Then I see the dynamically generated display name,
And when I then go to the container page for that same block,
Then I see the same generated display name.
"""
# Unfortunately no blocks in the core platform implement display_name_with_default
# in an interesting way for this test, so we are just testing for consistency and not
# the actual value.
unit = self.go_to_unit_page()
test_block = unit.xblocks[1]
title_on_unit_page = test_block.name
container = test_block.go_to_container()
self.assertEqual(container.name, title_on_unit_page)
@attr(shard=3)
class ProblemCategoryTabsTest(ContainerBase):
"""
Test to verify tabs in problem category.
"""
def setUp(self, is_staff=True):
super(ProblemCategoryTabsTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Sets up course structure.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_correct_tabs_present(self):
"""
Scenario: Verify that correct tabs are present in problem category.
Given I am a staff user
When I go to unit page
Then I only see `Common Problem Types` and `Advanced` tabs in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
self.assertEqual(page.get_category_tab_names('problem'), ['Common Problem Types', 'Advanced'])
def test_common_problem_types_tab(self):
"""
Scenario: Verify that correct components are present in Common Problem Types tab.
Given I am a staff user
When I go to unit page
Then I see correct components under `Common Problem Types` tab in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
expected_components = [
"Blank Common Problem",
"Checkboxes",
"Dropdown",
"Multiple Choice",
"Numerical Input",
"Text Input",
"Checkboxes with Hints and Feedback",
"Dropdown with Hints and Feedback",
"Multiple Choice with Hints and Feedback",
"Numerical Input with Hints and Feedback",
"Text Input with Hints and Feedback",
]
self.assertEqual(page.get_category_tab_components('problem', 1), expected_components)
@attr(shard=16)
@ddt.ddt
class MoveComponentTest(ContainerBase):
"""
Tests of moving an XBlock to another XBlock.
"""
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
def setUp(self, is_staff=True):
super(MoveComponentTest, self).setUp(is_staff=is_staff)
self.container = ContainerPage(self.browser, None)
self.move_modal_view = MoveModalView(self.browser)
self.navigation_options = {
'section': 0,
'subsection': 0,
'unit': 1,
}
self.source_component_display_name = 'HTML 11'
self.source_xblock_category = 'component'
self.message_move = u'Success! "{display_name}" has been moved.'
self.message_undo = u'Move cancelled. "{display_name}" has been moved back to its original location.'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure.
"""
# pylint: disable=attribute-defined-outside-init
self.unit_page1 = XBlockFixtureDesc('vertical', 'Test Unit 1').add_children(
XBlockFixtureDesc('html', 'HTML 11'),
XBlockFixtureDesc('html', 'HTML 12')
)
self.unit_page2 = XBlockFixtureDesc('vertical', 'Test Unit 2').add_children(
XBlockFixtureDesc('html', 'HTML 21'),
XBlockFixtureDesc('html', 'HTML 22')
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
self.unit_page1,
self.unit_page2
)
)
)
def verify_move_opertions(self, unit_page, source_component, operation, component_display_names_after_operation,
should_verify_publish_title=True):
"""
Verify move operations.
Arguments:
unit_page (Object) Unit container page.
source_component (Object) Source XBlock object to be moved.
operation (str), `move` or `undo move` operation.
component_display_names_after_operation (dict) Display names of components after operation in source/dest
should_verify_publish_title (Boolean) Should verify publish title ot not. Default is True.
"""
source_component.open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
if should_verify_publish_title:
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
# Verify unit in draft state now
if should_verify_publish_title:
self.container.verify_publish_title(self.DRAFT_STATUS)
if operation == 'move':
self.container.click_take_me_there_link()
elif operation == 'undo_move':
self.container.click_undo_move_link()
self.container.verify_confirmation_message(
self.message_undo.format(display_name=self.source_component_display_name)
)
unit_page = ContainerPage(self.browser, None)
components = unit_page.displayed_children
self.assertEqual(
[component.name for component in components],
component_display_names_after_operation
)
def verify_state_change(self, unit_page, operation):
"""
Verify that after state change, confirmation message is hidden.
Arguments:
unit_page (Object) Unit container page.
operation (String) Publish or discard changes operation.
"""
# Verify unit in draft state now
self.container.verify_publish_title(self.DRAFT_STATUS)
# Now click publish/discard button
if operation == 'publish':
unit_page.publish()
else:
unit_page.discard_changes()
# Now verify success message is hidden
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.container.verify_confirmation_message(
message=self.message_move.format(display_name=self.source_component_display_name),
verify_hidden=True
)
def test_move_component_successfully(self):
"""
Test if we can move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on take me there link
Then I see moved component there.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 11']
)
def test_undo_move_component_successfully(self):
"""
Test if we can undo move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
When I click on the move button
Then I see move operation successful message
And When I clicked on undo move link
Then I see that undo move operation is successful
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 11', 'HTML 12']
)
@ddt.data('publish', 'discard')
def test_publish_discard_changes_afer_move(self, operation):
"""
Test if success banner is hidden when we discard changes or publish the unit after a move operation.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on publish or discard changes button
Then I see move operation success message is hidden.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
components[0].open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
self.verify_state_change(unit_page, operation)
def test_content_experiment(self):
"""
Test if we can move a component of content experiment successfully.
Given that I am a staff user
And I go to content experiment page
And I open the move dialogue modal
When I navigate to the unit in second section
Then I see move button is enabled
And when I click on the move button
Then I see move operation success message
And when I click on take me there link
Then I see moved component there
And when I undo move a component
Then I see that undo move operation success message
"""
# Add content experiment support to course.
self.course_fixture.add_advanced_settings({
u'advanced_modules': {'value': ['split_test']},
})
# Create group configurations
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
'metadata': {
u'user_partitions': [
create_user_partition_json(
0,
'Test Group Configuration',
'Description of the group configuration.',
[Group('0', 'Group A'), Group('1', 'Group B')]
),
],
},
})
# Add split test to unit_page1 and assign newly created group configuration to it
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(self.unit_page1.locator, split_test)
# Visit content experiment container page.
unit_page = ContainerPage(self.browser, split_test.locator)
unit_page.visit()
group_a_locator = unit_page.displayed_children[0].locator
# Add some components to Group A.
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 311')
)
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 312')
)
# Go to group page to move it's component.
group_container_page = ContainerPage(self.browser, group_a_locator)
group_container_page.visit()
# Verify content experiment block has correct groups and components.
components = group_container_page.displayed_children
self.assertEqual(len(components), 2)
self.source_component_display_name = 'HTML 311'
# Verify undo move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 311', 'HTML 312'],
should_verify_publish_title=False
)
# Verify move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 311'],
should_verify_publish_title=False
)
# Ideally this test should be decorated with @attr('a11y') so that it should run in a11y jenkins job
# But for some reason it always fails in a11y jenkins job and passes always locally on devstack as well
# as in bokchoy jenkins job. Due to this reason, test is marked to run under bokchoy jenkins job.
def test_a11y(self):
"""
Verify move modal a11y.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
unit_page.a11y_audit.config.set_scope(
include=[".modal-window.move-modal"]
)
unit_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # TODO: AC-716
'link-href', # TODO: AC-716
]
})
unit_page.displayed_children[0].open_move_modal()
for category in ['section', 'subsection', 'component']:
self.move_modal_view.navigate_to_category(category, self.navigation_options)
unit_page.a11y_audit.check_for_accessibility_errors()
|
jolyonb/edx-platform
|
common/test/acceptance/tests/studio/test_studio_container.py
|
Python
|
agpl-3.0
| 68,525
|
[
"VisIt"
] |
a9789ceb62decaf4ad41acc79cd2fc29adc5be3bd922c1929421763a52a53fea
|
from django.contrib.auth import get_user_model
from core.tests.base import BaseTestCase
class UserModelTestCase(BaseTestCase):
def setUp(self):
super(UserModelTestCase, self).setUp()
# Create sample diaries
self.user.diary_set.create(
datetime="2016/07/02",
content="Today it was very hot!",
)
self.user.diary_set.create(
datetime="2016/07/15",
content="I went to the concert. And I had dinner with my friends",
)
self.user.diary_set.create(
datetime="2016/08/11",
content="I watched a action movie with my family. It was so exciting.",
)
self.user.diary_set.create(
datetime="2016/08/29",
content="I was so tired today. I don`t want to do no more",
)
def test_check_username_in_user_model(self):
test_wrong_username = "test_wrong_username"
self.assertTrue(
get_user_model().objects.check_username(self.test_username),
)
self.assertFalse(
get_user_model().objects.check_username(test_wrong_username),
)
def test_check_email_in_user_model(self):
test_wrong_email = "wrong@example.com"
self.assertTrue(
get_user_model().objects.check_email(self.test_email),
)
self.assertFalse(
get_user_model().objects.check_email(test_wrong_email),
)
def test_user_has_correct_monthly_words(self):
test_july_words = [
"today",
"it",
"was",
"very",
"hot",
"i",
"went",
"to",
"the",
"concert",
"and",
"had",
"dinner",
"with",
"my",
"friends",
]
test_august_words = [
"i",
"watched",
"a",
"action",
"movie",
"with",
"my",
"family",
"it",
"was",
"so",
"exciting",
"i",
"was",
"so",
"tired",
"today",
"don`t",
"want",
"to",
"do",
"no",
"more",
]
self.assertEqual(
sorted(test_july_words),
sorted(
self.user.monthly_words(
year="2016",
month="07",
),
),
)
self.assertEqual(
sorted(test_august_words),
sorted(
self.user.monthly_words(
year="2016",
month="08",
),
),
)
def test_user_has_correct_distinct_monthly_words_count(self):
self.assertEqual(
16,
self.user.distinct_monthly_words_count(
year="2016",
month="07",
),
)
self.assertEqual(
20,
self.user.distinct_monthly_words_count(
year="2016",
month="08",
),
)
def test_user_has_correct_whole_used_words(self):
test_whole_used_words = [
"today",
"it",
"was",
"very",
"hot",
"i",
"went",
"to",
"the",
"concert",
"and",
"had",
"dinner",
"with",
"my",
"friends",
"i",
"watched",
"a",
"action",
"movie",
"with",
"my",
"family",
"it",
"was",
"so",
"exciting",
"i",
"was",
"so",
"tired",
"today",
"don`t",
"want",
"to",
"do",
"no",
"more",
]
self.assertEqual(
sorted(test_whole_used_words),
sorted(self.user.whole_used_words),
)
def test_user_has_correct_distinct_whole_used_words_count(self):
self.assertEqual(
29,
self.user.distinct_whole_used_words_count,
)
|
jupiny/EnglishDiary
|
english_diary/users/tests/test_models.py
|
Python
|
mit
| 4,425
|
[
"exciting"
] |
86a9e5f2dffdaa6d3533eba976880aa0667ccd024db547e6b75d39f271a73e45
|
#
# A file that opens the neuroConstruct project LarkumEtAl2009 run the cell model LarkumPyr (used by the teststim.py script)
#
# Author: Matteo Farinella
from sys import *
from java.io import File
from java.lang import System
from java.util import ArrayList
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.gui import SimulationRerunFrame
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from ucl.physiol.neuroconstruct.simulation import SpikeAnalyser
from math import *
import random
import time
import shutil
import os
import subprocess
print "============================"
print "run neuroConstruct model"
print "============================"
projName = "LarkumEtAl2009"
projFile = File("/home/matteo/neuroConstruct/models/"+projName+"/"+projName+".ncx")
print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists())
pm = ProjectManager()
myProject = pm.loadProject(projFile)
myProject.neuronSettings.setNoConsole() # Calling this means no console/terminal is opened when each simula
simConfig = myProject.simConfigInfo.getSimConfig("test_IClamp")# configuration aimed to reproduce the IClamp from modelDB
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
numGenerated = myProject.generatedCellPositions.getNumberInAllCellGroups()
simRef = "testsim"
myProject.simulationParameters.setReference(simRef)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
print "allow 3 mins for simulation to finish..."
time.sleep(180)
# get values from NEURON-vector format into Python format
times_nC = [] # Use list to add another trace later.
linestring = open('models/LarkumEtAl2009/simulations/testsim/time.dat', 'r').read()
times_nC = map(float, linestring.split())
voltages_nC = []
linestring = open('models/LarkumEtAl2009/simulations/testsim/pyr_group_0.dat', 'r').read()
voltages_nC = map(float, linestring.split())
print "================================="
print "load original modelDB spike times"
print "================================="
print "opening file .../ModelDB/IClamp_somaV.txt"
# the file IClamp_somaV.txt contains the somatic recording from the original ModelDB model
# run with a time step of 0.025 ms, Ra = 85 Mohm, eK = -87mV (as in the paper)
# get values from NEURON-vector format into Python format
times_original = []
voltages_original = []
for line in open ('models/LarkumEtAl2009/ModelDB/IClamp_somaV.txt', 'rt'):
t, v = [float (x) for x in line.split()]
times_original.append (t)
voltages_original.append (v)
print "============================"
print "plot and compare spike times"
print "============================"
analyseStartTime = 0 # ms
analyseStopTime = 600
analyseThreshold = -20 # mV
spikeTimes_nC = SpikeAnalyser.getSpikeTimes(voltages_nC, times_nC, analyseThreshold, analyseStartTime, analyseStopTime)
print "neuroConcsturct spike times:"
print spikeTimes_nC
spikeTimes_original = SpikeAnalyser.getSpikeTimes(voltages_original, times_original, analyseThreshold, analyseStartTime, analyseStopTime)
print "modelDB spike times:"
print spikeTimes_original
plotFrame = PlotManager.getPlotterFrame("test model: "+str(myProject.getProjectFile()) , 1, 1)
plotFrame.setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = "modelDB vs nC: "+str(simConfig)
dataSet_nC = DataSet(info, info, "ms", "nC", "time", "nC")
dataSet_nC.setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
for t1 in spikeTimes_nC:
dataSet_nC.addPoint(t1,1)
dataSet_original = DataSet(info, info, "ms", "modelDB", "time", "modelDB")
dataSet_original.setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
for t2 in spikeTimes_original:
dataSet_original.addPoint(t2,1)
plotFrame.addDataSet(dataSet_nC)
plotFrame.addDataSet(dataSet_original)
test = 0
if len(spikeTimes_nC) == len(spikeTimes_original):
test = 1
for x in range(len(spikeTimes_nC)):
if (abs(spikeTimes_nC[x] - spikeTimes_original[x]) > 1): # 1 ms precision
test = 0
if test == 0 :
print "**************** TEST FAILED ****************"
else:
print "================ TEST PASSED ================"
|
pgleeson/TestArea
|
models/LarkumEtAl2009/testmodel.py
|
Python
|
gpl-2.0
| 5,240
|
[
"NEURON"
] |
e7354600c32e979caab4725124adf61a54737f698b0c3f6b080cf39d41072e27
|
# -*- coding: utf-8 -*-
"""
Display cryptocurrency data.
The site we retrieve cryptocurrency data from offer various types of data such as
name, symbol, price, volume, percentage change, total supply, et cetera for a wide
range of cryptocurrencies and the prices can be obtained in a different currency
along with USD currency, For more information, visit https://coinmarketcap.com
Configuration parameters:
cache_timeout: refresh interval for this module. A message from the site:
Please limit requests to no more than 10 per minute. (default 600)
format: display format for this module (default '{format_coin}')
format_coin: display format for coins
(default '{name} ${price_usd:.2f} [\?color=24h {percent_change_24h}%]')
format_separator: show separator if more than one (default ' ')
markets: number of top-ranked markets or list of user-inputted markets
(default ['btc'])
request_timeout: time to wait for a response, in seconds (default 5)
thresholds: for percentage changes (default [(-100, 'bad'), (0, 'good')])
Format placeholder:
{format_coin} format for cryptocurrency coins
format_coin placeholders:
{24h_volume_usd} eg 1435150000.0
{available_supply} eg 16404825.0
{id} eg bitcoin
{last_updated} eg 1498135152
{market_cap_usd} eg 44119956596.0
{name} eg Bitcoin
{percent_change_1h} eg -0.17
{percent_change_24h} eg -1.93
{percent_change_7d} eg +14.73
{price_btc} eg 1.0
{price_usd} eg 2689.45
{rank} eg 1
{symbol} eg BTC
{total_supply} eg 16404825.0
Placeholders are retrieved directly from the URL.
The list was harvested only once and should not represent a full list.
To print coins in different currency, replace or replicate the placeholders
below with a valid option (eg '{price_gbp}') to create additional placeholders:
{price_xxx} eg (new output here)
{24h_volume_xxx} eg (new output here)
{market_cap_xxx} eg (new output here)
Valid options are: AUD, BRL, CAD, CHF, CNY, EUR, GBP, HKD, IDR, INR,
JPY, KRW, MXN, RUB, otherwise USD... and be written in lowercase.
Color thresholds:
1h: print color based on the value of percent_change_1h
24h: print color based on the value of percent_change_24h
7d: print color based on the value of percent_change_7d
Example:
```
# view coins in GBP and optionally USD
coin_market {
format_coin = '{name} £{price_gbp:.2f} ${price_usd:.2f} {percent_change_24h}'
}
```
@author lasers, x86kernel
SAMPLE OUTPUT
[
{'color': '#FFFFFF', 'full_text': 'Bitcoin $2735.77 '},
{'color': '#00FF00', 'full_text': '+2.27%'},
]
losers
[
{'color': '#FFFFFF', 'full_text': 'Bitcoin $2701.70 '},
{'color': '#FF0000', 'full_text': '-0.42%'},
]
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 600
format = '{format_coin}'
format_coin = '{name} ${price_usd:.2f} [\?color=24h {percent_change_24h}%]'
format_separator = ' '
markets = ['btc']
request_timeout = 5
thresholds = [(-100, 'bad'), (0, 'good')]
def post_config_hook(self):
self.first_run = self.first_use = True
self.convert = self.limit = None
self.url = self.reset_url = 'https://api.coinmarketcap.com/v1/ticker/'
# find out if we want top-ranked markets or user-inputted markets
if isinstance(self.markets, int):
self.limit = self.markets
else:
self.markets = [x.upper().strip() for x in self.markets]
# create '?convert'
for item in self.py3.get_placeholders_list(self.format_coin):
if (('price' in item and 'price_btc' not in item) or
'24h_volume' in item or 'market_cap' in item) \
and 'usd' not in item:
self.convert = '?convert=%s' % (item.split('_')[-1])
self.url = self.reset_url = self.reset_url + self.convert
break
# create '(?|&)limit'
if self.limit:
self._update_limit(None)
def _get_coin_data(self, reset=False):
if reset:
self.url = self.reset_url
try:
data = self.py3.request(self.url, timeout=self.request_timeout).json()
except self.py3.RequestException:
data = {}
return data
def _update_limit(self, data):
# we use limit if it exists. otherwise, we stretch the limit
# large enough to obtain (all) self.markets + some padding
self.url = self.url + ('&' if self.convert else '?')
if self.limit:
limit = self.limit
else:
limit = 0
for market_id in self.markets:
index = next((i for (i, d) in enumerate(
data) if d['symbol'] == market_id), -1)
if index >= limit:
limit = index
limit += 5 # padding
self.url += 'limit=%s' % limit
def _strip_data(self, data):
# if self.limit, we don't strip. otherwise, we strip 1000+ coins
# down to %s coins by removing everything not in self.markets.
new_data = []
if self.limit:
new_data = data
else:
for symbol in self.markets:
for market in data:
if symbol == market['symbol']:
new_data.append(market)
break
return new_data
def _organize_data(self, data):
# compare len(stripped(1000+ coins) with len(self.markets)
new_data = self._strip_data(data)
is_equal = len(new_data) == len(self.markets)
# first_use bad? the user entered bad markets. stop here (error).
# otherwise, make a limit for first time on 1000+ coins.
if self.first_use:
self.first_use = False
if not is_equal:
self.py3.error('bad markets')
else:
self._update_limit(data)
elif not is_equal:
# post first_use bad? the markets fell out of the limit + padding.
# reset the url to get 1000+ coins again so we can strip, compare,
# make new limit + padding for next loop, but we'll use that new
# data. otherwise, we would keep going with that first new_data.
new_data = self._get_coin_data(reset=True)
new_data = self._strip_data(new_data)
self._update_limit(new_data)
return new_data
def _manipulate_data(self, data):
# we mess with raw data to get the new results. we fix up percent_change
# with color thresholds and prefix all non-negative values wth a plus.
new_data = []
for market in data:
temporary = {}
for k, v in market.items():
if 'percent_change_' in k and v:
temporary[k] = '+%s' % v if float(v) > 0 else v
# remove 'percent_change_' for thresholds: 1h, 24h, or 7d
self.py3.threshold_get_color(v, k[15:])
else:
temporary[k] = v
new_data.append(self.py3.safe_format(self.format_coin, temporary))
return new_data
def coin_market(self):
data = []
if self.first_run:
self.first_run = False
cached_until = 0
else:
# first 1000+ coins (then %s coins)
cached_until = self.cache_timeout
coin_data = self._get_coin_data()
if not self.limit:
# strip, compare, and maybe update again
coin_data = self._organize_data(coin_data)
data = self._manipulate_data(coin_data) # paint coin colors
format_separator = self.py3.safe_format(self.format_separator)
format_coin = self.py3.composite_join(format_separator, data)
return {
'cached_until': self.py3.time_in(cached_until),
'full_text': self.py3.safe_format(self.format, {'format_coin': format_coin})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
alexoneill/py3status
|
py3status/modules/coin_market.py
|
Python
|
bsd-3-clause
| 8,379
|
[
"VisIt"
] |
f6bc20ca9909827277c72230d07a4fcfcaeb7f9eb8eae0052f80e0ea6ecd7cc9
|
"""This module implements an scanerless Earley parser.
The core Earley algorithm used here is based on Elizabeth Scott's implementation, here:
https://www.sciencedirect.com/science/article/pii/S1571066108001497
That is probably the best reference for understanding the algorithm here.
The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format
is better documented here:
http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/
"""
import logging
from collections import deque
from ..visitors import Transformer_InPlace, v_args
from ..exceptions import UnexpectedEOF, UnexpectedToken
from .grammar_analysis import GrammarAnalyzer
from ..grammar import NonTerminal
from .earley_common import Item, TransitiveItem
from .earley_forest import ForestToTreeVisitor, ForestSumVisitor, SymbolNode, ForestToAmbiguousTreeVisitor
class Parser:
def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, debug=False):
analysis = GrammarAnalyzer(parser_conf)
self.parser_conf = parser_conf
self.resolve_ambiguity = resolve_ambiguity
self.debug = debug
self.FIRST = analysis.FIRST
self.NULLABLE = analysis.NULLABLE
self.callbacks = parser_conf.callbacks
self.predictions = {}
## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than
# the slow 'isupper' in is_terminal.
self.TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if sym.is_term }
self.NON_TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if not sym.is_term }
self.forest_sum_visitor = None
for rule in parser_conf.rules:
if rule.origin not in self.predictions:
self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)]
## Detect if any rules have priorities set. If the user specified priority = "none" then
# the priorities will be stripped from all rules before they reach us, allowing us to
# skip the extra tree walk. We'll also skip this if the user just didn't specify priorities
# on any rules.
if self.forest_sum_visitor is None and rule.options.priority is not None:
self.forest_sum_visitor = ForestSumVisitor
self.term_matcher = term_matcher
def predict_and_complete(self, i, to_scan, columns, transitives):
"""The core Earley Predictor and Completer.
At each stage of the input, we handling any completed items (things
that matched on the last cycle) and use those to predict what should
come next in the input stream. The completions and any predicted
non-terminals are recursively processed until we reach a set of,
which can be added to the scan list for the next scanner cycle."""
# Held Completions (H in E.Scotts paper).
node_cache = {}
held_completions = {}
column = columns[i]
# R (items) = Ei (column.items)
items = deque(column)
while items:
item = items.pop() # remove an element, A say, from R
### The Earley completer
if item.is_complete: ### (item.s == string)
if item.node is None:
label = (item.s, item.start, i)
item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
item.node.add_family(item.s, item.rule, item.start, None, None)
# create_leo_transitives(item.rule.origin, item.start)
###R Joop Leo right recursion Completer
if item.rule.origin in transitives[item.start]:
transitive = transitives[item.start][item.s]
if transitive.previous in transitives[transitive.column]:
root_transitive = transitives[transitive.column][transitive.previous]
else:
root_transitive = transitive
new_item = Item(transitive.rule, transitive.ptr, transitive.start)
label = (root_transitive.s, root_transitive.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_path(root_transitive, item.node)
if new_item.expect in self.TERMINALS:
# Add (B :: aC.B, h, y) to Q
to_scan.add(new_item)
elif new_item not in column:
# Add (B :: aC.B, h, y) to Ei and R
column.add(new_item)
items.append(new_item)
###R Regular Earley completer
else:
# Empty has 0 length. If we complete an empty symbol in a particular
# parse step, we need to be able to use that same empty symbol to complete
# any predictions that result, that themselves require empty. Avoids
# infinite recursion on empty symbols.
# held_completions is 'H' in E.Scott's paper.
is_empty_item = item.start == i
if is_empty_item:
held_completions[item.rule.origin] = item.node
originators = [originator for originator in columns[item.start] if originator.expect is not None and originator.expect == item.s]
for originator in originators:
new_item = originator.advance()
label = (new_item.s, originator.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node)
if new_item.expect in self.TERMINALS:
# Add (B :: aC.B, h, y) to Q
to_scan.add(new_item)
elif new_item not in column:
# Add (B :: aC.B, h, y) to Ei and R
column.add(new_item)
items.append(new_item)
### The Earley predictor
elif item.expect in self.NON_TERMINALS: ### (item.s == lr0)
new_items = []
for rule in self.predictions[item.expect]:
new_item = Item(rule, 0, i)
new_items.append(new_item)
# Process any held completions (H).
if item.expect in held_completions:
new_item = item.advance()
label = (new_item.s, item.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect])
new_items.append(new_item)
for new_item in new_items:
if new_item.expect in self.TERMINALS:
to_scan.add(new_item)
elif new_item not in column:
column.add(new_item)
items.append(new_item)
def _parse(self, stream, columns, to_scan, start_symbol=None):
def is_quasi_complete(item):
if item.is_complete:
return True
quasi = item.advance()
while not quasi.is_complete:
if quasi.expect not in self.NULLABLE:
return False
if quasi.rule.origin == start_symbol and quasi.expect == start_symbol:
return False
quasi = quasi.advance()
return True
def create_leo_transitives(origin, start):
visited = set()
to_create = []
trule = None
previous = None
### Recursively walk backwards through the Earley sets until we find the
# first transitive candidate. If this is done continuously, we shouldn't
# have to walk more than 1 hop.
while True:
if origin in transitives[start]:
previous = trule = transitives[start][origin]
break
is_empty_rule = not self.FIRST[origin]
if is_empty_rule:
break
candidates = [ candidate for candidate in columns[start] if candidate.expect is not None and origin == candidate.expect ]
if len(candidates) != 1:
break
originator = next(iter(candidates))
if originator is None or originator in visited:
break
visited.add(originator)
if not is_quasi_complete(originator):
break
trule = originator.advance()
if originator.start != start:
visited.clear()
to_create.append((origin, start, originator))
origin = originator.rule.origin
start = originator.start
# If a suitable Transitive candidate is not found, bail.
if trule is None:
return
#### Now walk forwards and create Transitive Items in each set we walked through; and link
# each transitive item to the next set forwards.
while to_create:
origin, start, originator = to_create.pop()
titem = None
if previous is not None:
titem = previous.next_titem = TransitiveItem(origin, trule, originator, previous.column)
else:
titem = TransitiveItem(origin, trule, originator, start)
previous = transitives[start][origin] = titem
def scan(i, token, to_scan):
"""The core Earley Scanner.
This is a custom implementation of the scanner that uses the
Lark lexer to match tokens. The scan list is built by the
Earley predictor, based on the previously completed tokens.
This ensures that at each phase of the parse we have a custom
lexer context, allowing for more complex ambiguities."""
next_to_scan = set()
next_set = set()
columns.append(next_set)
transitives.append({})
node_cache = {}
for item in set(to_scan):
if match(item.expect, token):
new_item = item.advance()
label = (new_item.s, new_item.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
if new_item.expect in self.TERMINALS:
# add (B ::= Aai+1.B, h, y) to Q'
next_to_scan.add(new_item)
else:
# add (B ::= Aa+1.B, h, y) to Ei+1
next_set.add(new_item)
if not next_set and not next_to_scan:
expect = {i.expect.name for i in to_scan}
raise UnexpectedToken(token, expect, considered_rules = set(to_scan))
return next_to_scan
# Define parser functions
match = self.term_matcher
# Cache for nodes & tokens created in a particular parse step.
transitives = [{}]
## The main Earley loop.
# Run the Prediction/Completion cycle for any Items in the current Earley set.
# Completions will be added to the SPPF tree, and predictions will be recursively
# processed down to terminals/empty nodes to be added to the scanner for the next
# step.
i = 0
for token in stream:
self.predict_and_complete(i, to_scan, columns, transitives)
to_scan = scan(i, token, to_scan)
i += 1
self.predict_and_complete(i, to_scan, columns, transitives)
## Column is now the final column in the parse.
assert i == len(columns)-1
return to_scan
def parse(self, stream, start):
assert start, start
start_symbol = NonTerminal(start)
columns = [set()]
to_scan = set() # The scan buffer. 'Q' in E.Scott's paper.
## Predict for the start_symbol.
# Add predicted items to the first Earley set (for the predictor) if they
# result in a non-terminal, or the scanner if they result in a terminal.
for rule in self.predictions[start_symbol]:
item = Item(rule, 0, 0)
if item.expect in self.TERMINALS:
to_scan.add(item)
else:
columns[0].add(item)
to_scan = self._parse(stream, columns, to_scan, start_symbol)
# If the parse was successful, the start
# symbol should have been completed in the last step of the Earley cycle, and will be in
# this column. Find the item for the start_symbol, which is the root of the SPPF tree.
solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0]
if self.debug:
from .earley_forest import ForestToPyDotVisitor
try:
debug_walker = ForestToPyDotVisitor()
except ImportError:
logging.warning("Cannot find dependency 'pydot', will not generate sppf debug image")
else:
debug_walker.visit(solutions[0], "sppf.png")
if not solutions:
expected_tokens = [t.expect for t in to_scan]
raise UnexpectedEOF(expected_tokens)
elif len(solutions) > 1:
assert False, 'Earley should not generate multiple start symbol items!'
# Perform our SPPF -> AST conversion using the right ForestVisitor.
forest_tree_visitor_cls = ForestToTreeVisitor if self.resolve_ambiguity else ForestToAmbiguousTreeVisitor
forest_tree_visitor = forest_tree_visitor_cls(self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor())
return forest_tree_visitor.visit(solutions[0])
class ApplyCallbacks(Transformer_InPlace):
def __init__(self, postprocess):
self.postprocess = postprocess
@v_args(meta=True)
def drv(self, children, meta):
return self.postprocess[meta.rule](children)
|
python-poetry/poetry-core
|
src/poetry/core/_vendor/lark/parsers/earley.py
|
Python
|
mit
| 14,853
|
[
"VisIt"
] |
0ee49890c1373beb491f14a7cdd88b4ea988880a458ddd8888efa0d9aadc1453
|
from pymol import cmd
#from glob import glob
#import build_seq
#import seq_convert
#files = glob.glob("/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/Scripts/sequences/*")
#for file in glob("sekwencje/*"):
#file="AA"
#runbuildseq='run /home/wiktor/Zabawki/pymol_scripts/lib/build_seq.py'
#cmd.do(runbuildseq)
#cmd.do('run /home/wiktor/Zabawki/pymol_scripts/lib/build_seq.py')
#for aa in "DCAHWLGELVWCT": cmd._alt(string.lower(aa))
residues = ('A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y')
for residue in residues:
for i in range(3):
cmd._alt(residue.lower())
# cmd.set_geometry ??
#cmd.save(aa.pdb, "all")
#run /home/wiktor/Zabawki/pymol_scripts/lib/build_seq.py
#cmd.do("run seq_convert.py")
#builds='build_seq'+'AA'+',ss=helix'
#cmd.do(builds)
#pdb='AA.pdb'+','+'ala'
#cmd.save(pdb)
#cmd.delete("all")
# cmd.load(file,'prot')
# for a in cmd.index("elem s and bound_to elem s"):
# if cmd.select("s1","%s`%d"%a) and \
# cmd.select("s2","elem s and bound_to %s`%d"%a):
# if cmd.select("(s1|s2) and not ?skip"):
# cmd.iterate("s1|s2","print ' ',chain,resn,resi,name")
# print ' ',round(cmd.dist("tmp","s1","s2"),3)
# cmd.select("skip","s1|s2|?skip")
#cd Scripts
#run build_seq_phi_psi.py
#run build_seq.py
#from glob import glob
#cd phi-psi-sequences
#for a in glob("*"): cmd.do("build_seq"+' '+a), cmd.save("/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/Scripts/structures/"+a+".pdb"), cmd.delete("all")
#from pymol import cmd,stored
#import build_seq
#import seq_convert
#import sys, os
#path=('/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/Scripts/tmprandom/')
#dirlst = listdir(path)
#for afile in dirlst:
# fh = open(path+afile,'r')
# for aline in fh.readlines():
# if not aline startswith('>'):
# build_seq(aline,None,None,None)
# cmd.save('/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/'+ afile.pdb,(afile))
#cmd.do("run build_seq.py")
#fileopen=('/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/Scripts/tmprandom/randomA')
#for sequence in fileopen.readline():
# for aa in sequence: cmd._alt(string.lower(aa))
#cmd.save('/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/Scripts/tmprandom/'+sequence+'.pdb',(selection))
# cmd build_seq(aline)
# cmd.save('/afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/'+afile.pdb, )
#for aa in "IYRSEH": cmd._alt(string.lower(aa))
#save /afs/pdc.kth.se/home/s/syazdi/Disc2/PROJECT/ile.pdb,(ile)
|
wjurkowski/tmdrug
|
peptide_builder/pymol_create_sequence.py
|
Python
|
apache-2.0
| 2,508
|
[
"PyMOL"
] |
3e2e05eca907844e932b354ee884b5c0a9ca7ef9d0964a9a6b4b9696094a84b1
|
#!/usr/bin/env python
import os
import sys
import string
import argparse
import subprocess
import tempfile
parser = argparse.ArgumentParser(description='Android system files extractor')
parser.add_argument("-p", "--prefix", metavar="NAME", required=True,
help="Prefix for stored files, e.g. galaxy-s7-us")
# System files which need to be read with `adb shell cat filename`
# instead of `adb pull filename`
SHELL_PREFIX = [
"/sys/class/kgsl/kgsl-3d0/",
]
SYSTEM_FILES = [
"/proc/cpuinfo",
"/system/build.prop",
"/sys/class/kgsl/kgsl-3d0/bus_split",
"/sys/class/kgsl/kgsl-3d0/clock_mhz",
"/sys/class/kgsl/kgsl-3d0/deep_nap_timer",
"/sys/class/kgsl/kgsl-3d0/default_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/dev",
"/sys/class/kgsl/kgsl-3d0/devfreq/available_frequencies",
"/sys/class/kgsl/kgsl-3d0/devfreq/available_governors",
"/sys/class/kgsl/kgsl-3d0/devfreq/cur_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/governor",
"/sys/class/kgsl/kgsl-3d0/devfreq/gpu_load",
"/sys/class/kgsl/kgsl-3d0/devfreq/max_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/min_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/polling_interval",
"/sys/class/kgsl/kgsl-3d0/devfreq/suspend_time",
"/sys/class/kgsl/kgsl-3d0/devfreq/target_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/trans_stat",
"/sys/class/kgsl/kgsl-3d0/device/op_cpu_table",
"/sys/class/kgsl/kgsl-3d0/freq_table_mhz",
"/sys/class/kgsl/kgsl-3d0/ft_fast_hang_detect",
"/sys/class/kgsl/kgsl-3d0/ft_hang_intr_status",
"/sys/class/kgsl/kgsl-3d0/ft_long_ib_detect",
"/sys/class/kgsl/kgsl-3d0/ft_pagefault_policy",
"/sys/class/kgsl/kgsl-3d0/ft_policy",
"/sys/class/kgsl/kgsl-3d0/gpu_available_frequencies",
"/sys/class/kgsl/kgsl-3d0/gpu_busy_percentage",
"/sys/class/kgsl/kgsl-3d0/gpu_clock_stats",
"/sys/class/kgsl/kgsl-3d0/gpu_llc_slice_enable",
"/sys/class/kgsl/kgsl-3d0/gpu_model",
"/sys/class/kgsl/kgsl-3d0/gpubusy",
"/sys/class/kgsl/kgsl-3d0/gpuclk",
"/sys/class/kgsl/kgsl-3d0/gpuhtw_llc_slice_enable",
"/sys/class/kgsl/kgsl-3d0/hwcg",
"/sys/class/kgsl/kgsl-3d0/idle_timer",
"/sys/class/kgsl/kgsl-3d0/lm",
"/sys/class/kgsl/kgsl-3d0/max_gpuclk",
"/sys/class/kgsl/kgsl-3d0/max_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/min_clock_mhz",
"/sys/class/kgsl/kgsl-3d0/min_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/num_pwrlevels",
"/sys/class/kgsl/kgsl-3d0/pmqos_active_latency",
"/sys/class/kgsl/kgsl-3d0/popp",
"/sys/class/kgsl/kgsl-3d0/preempt_count",
"/sys/class/kgsl/kgsl-3d0/preempt_level",
"/sys/class/kgsl/kgsl-3d0/preemption",
"/sys/class/kgsl/kgsl-3d0/pwrscale",
"/sys/class/kgsl/kgsl-3d0/reset_count",
"/sys/class/kgsl/kgsl-3d0/skipsaverestore",
"/sys/class/kgsl/kgsl-3d0/sptp_pc",
"/sys/class/kgsl/kgsl-3d0/thermal_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/throttling",
"/sys/class/kgsl/kgsl-3d0/usesgmem",
"/sys/class/kgsl/kgsl-3d0/wake_nice",
"/sys/class/kgsl/kgsl-3d0/wake_timeout",
"/sys/devices/soc0/accessory_chip",
"/sys/devices/soc0/build_id",
"/sys/devices/soc0/chip_family",
"/sys/devices/soc0/chip_name",
"/sys/devices/soc0/family",
"/sys/devices/soc0/foundry_id",
"/sys/devices/soc0/hw_platform",
"/sys/devices/soc0/image_crm_version",
"/sys/devices/soc0/image_variant",
"/sys/devices/soc0/image_version",
"/sys/devices/soc0/images",
"/sys/devices/soc0/machine",
"/sys/devices/soc0/ncluster_array_offset",
"/sys/devices/soc0/ndefective_parts_array_offset",
"/sys/devices/soc0/nmodem_supported",
"/sys/devices/soc0/nproduct_id",
"/sys/devices/soc0/num_clusters",
"/sys/devices/soc0/num_defective_parts",
"/sys/devices/soc0/platform_subtype",
"/sys/devices/soc0/platform_subtype_id",
"/sys/devices/soc0/platform_version",
"/sys/devices/soc0/pmic_die_revision",
"/sys/devices/soc0/pmic_model",
"/sys/devices/soc0/raw_device_family",
"/sys/devices/soc0/raw_device_number",
"/sys/devices/soc0/raw_id",
"/sys/devices/soc0/raw_version",
"/sys/devices/soc0/revision",
"/sys/devices/soc0/select_image",
"/sys/devices/soc0/serial_number",
"/sys/devices/soc0/soc_id",
"/sys/devices/soc0/vendor",
"/sys/devices/system/cpu/kernel_max",
"/sys/devices/system/cpu/possible",
"/sys/devices/system/cpu/present",
"/sys/devices/system/cpu/online",
"/sys/devices/system/cpu/offline",
"/sys/devices/system/cpu/modalias",
"/sys/devices/system/cpu/cpufreq/all_time_in_state",
"/sys/devices/system/cpu/cpufreq/current_in_state",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/big_cpu_num",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/big_max_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/big_min_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/hmp_boost_type",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/hmp_prev_boost_type",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_cpu_num",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_divider",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_max_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_min_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_min_lock",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/requests",
"/sys/devices/system/cpu/cpuidle/current_driver",
"/sys/devices/system/cpu/cpuidle/current_governor_ro",
"/sys/devices/system/cpu/cputopo/cpus_per_cluster",
"/sys/devices/system/cpu/cputopo/big_cpumask",
"/sys/devices/system/cpu/cputopo/glbinfo",
"/sys/devices/system/cpu/cputopo/is_big_little",
"/sys/devices/system/cpu/cputopo/is_multi_cluster",
"/sys/devices/system/cpu/cputopo/little_cpumask",
"/sys/devices/system/cpu/cputopo/nr_clusters",
"/sys/devices/system/b.L/big_threads",
"/sys/devices/system/b.L/boot_cluster",
"/sys/devices/system/b.L/core_status",
"/sys/devices/system/b.L/little_threads",
"/sys/devices/system/b.L/down_migrations",
"/sys/devices/system/b.L/up_migrations",
"/sys/devices/system/cpu/clusterhotplug/cur_hstate",
"/sys/devices/system/cpu/clusterhotplug/down_freq",
"/sys/devices/system/cpu/clusterhotplug/down_tasks",
"/sys/devices/system/cpu/clusterhotplug/down_threshold",
"/sys/devices/system/cpu/clusterhotplug/sampling_rate",
"/sys/devices/system/cpu/clusterhotplug/time_in_state",
"/sys/devices/system/cpu/clusterhotplug/up_freq",
"/sys/devices/system/cpu/clusterhotplug/up_tasks",
"/sys/devices/system/cpu/clusterhotplug/up_threshold",
]
CPU_FILES = [
"core_ctl/active_cpus",
"core_ctl/busy_up_thres",
"core_ctl/busy_down_thres",
"core_ctl/enable",
"core_ctl/global_state",
"core_ctl/is_big_cluster",
"core_ctl/max_cpus",
"core_ctl/min_cpus",
"core_ctl/need_cpus",
"core_ctl/not_preferred",
"core_ctl/offline_delay_ms",
"core_ctl/task_thres",
"current_driver",
"current_governor_ro",
"cpuidle/driver/name",
"cpufreq/affected_cpus",
"cpufreq/cpuinfo_max_freq",
"cpufreq/cpuinfo_min_freq",
"cpufreq/cpuinfo_transition_latency",
"cpufreq/related_cpus",
"cpufreq/scaling_available_frequencies",
"cpufreq/scaling_available_governors",
"cpufreq/scaling_cur_freq",
"cpufreq/scaling_driver",
"cpufreq/scaling_governor",
"cpufreq/scaling_max_freq",
"cpufreq/scaling_min_freq",
"cpufreq/sched/down_throttle_nsec",
"cpufreq/sched/up_throttle_nsec",
"cpufreq/stats/time_in_state",
"cpufreq/stats/total_trans",
"cpufreq/stats/trans_table",
"isolate",
"regs/identification/midr_el1",
"regs/identification/revidr_el1",
"sched_load_boost",
"topology/core_id",
"topology/core_siblings",
"topology/core_siblings_list",
"topology/cpu_capacity",
"topology/max_cpu_capacity",
"topology/physical_package_id",
"topology/thread_siblings",
"topology/thread_siblings_list",
]
CACHE_FILES = [
"allocation_policy",
"coherency_line_size",
"level",
"number_of_sets",
"shared_cpu_list",
"shared_cpu_map",
"size",
"type",
"ways_of_associativity",
"write_policy",
]
def c_escape(string):
c_string = ""
for c in string:
if c == "\\":
c_string += "\\\\"
elif c == "\"":
c_string += "\\\""
elif c == "\t":
c_string += "\\t"
elif c == "\n":
c_string += "\\n"
elif c == "\r":
c_string += "\\r"
elif ord(c) == 0:
c_string += "\\0"
elif 32 <= ord(c) < 127:
c_string += c
else:
c_string += "x%02X" % ord(c)
return c_string
def adb_shell(commands):
env = os.environ.copy()
env["LC_ALL"] = "C"
adb = subprocess.Popen(["adb", "shell"] + commands, env=env, stdout=subprocess.PIPE)
stdout, _ = adb.communicate()
if adb.returncode == 0:
return stdout
def adb_pull(device_path, local_path):
if any(device_path.startswith(prefix) for prefix in SHELL_PREFIX):
content = adb_shell(["cat", device_path])
if content is not None:
if not content.rstrip().endswith("No such file or directory"):
with open(local_path, "wb") as local_file:
local_file.write(content)
return True
else:
env = os.environ.copy()
env["LC_ALL"] = "C"
adb = subprocess.Popen(["adb", "pull", device_path, local_path], env=env)
adb.communicate()
return adb.returncode == 0
def adb_getprop():
properties = adb_shell(["getprop"])
properties_list = list()
while properties:
assert properties.startswith("[")
properties = properties[1:]
key, properties = properties.split("]", 1)
properties = properties.strip()
assert properties.startswith(":")
properties = properties[1:].strip()
assert properties.startswith("[")
properties = properties[1:]
value, properties = properties.split("]", 1)
properties = properties.strip()
properties_list.append((key, value))
return properties_list
def dump_device_file(stream, path):
temp_fd, temp_path = tempfile.mkstemp()
os.close(temp_fd)
try:
if adb_pull(path, temp_path):
with open(temp_path, "rb") as temp_file:
content = temp_file.read()
stream.write("\t{\n")
stream.write("\t\t.path = \"%s\",\n" % path)
stream.write("\t\t.size = %d,\n" % len(content))
if len(content.splitlines()) > 1:
stream.write("\t\t.content =")
for line in content.splitlines(True):
stream.write("\n\t\t\t\"%s\"" % c_escape(line))
stream.write(",\n")
else:
stream.write("\t\t.content = \"%s\",\n" % c_escape(content))
stream.write("\t},\n")
return content
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
def main(args):
options = parser.parse_args(args)
dmesg_content = adb_shell(["dmesg"])
if dmesg_content is not None and dmesg_content.strip() == "klogctl: Operation not permitted":
dmesg_content = None
if dmesg_content is not None:
with open(os.path.join("test", "dmesg", options.prefix + ".log"), "w") as dmesg_dump:
dmesg_dump.write(dmesg_content)
build_prop_content = None
proc_cpuinfo_content = None
kernel_max = 0
with open(os.path.join("test", "mock", options.prefix + ".h"), "w") as file_header:
file_header.write("struct cpuinfo_mock_file filesystem[] = {\n")
for path in SYSTEM_FILES:
content = dump_device_file(file_header, path)
if content is not None:
if path == "/proc/cpuinfo":
proc_cpuinfo_content = content
elif path == "/system/build.prop":
build_prop_content = content
elif path == "/sys/devices/system/cpu/kernel_max":
kernel_max = int(content.strip())
for cpu in range(kernel_max + 1):
for filename in CPU_FILES:
path = "/sys/devices/system/cpu/cpu%d/%s" % (cpu, filename)
dump_device_file(file_header, path)
for index in range(5):
for filename in CACHE_FILES:
path = "/sys/devices/system/cpu/cpu%d/cache/index%d/%s" % (cpu, index, filename)
dump_device_file(file_header, path)
file_header.write("\t{ NULL },\n")
file_header.write("};\n")
file_header.write("#ifdef __ANDROID__\n")
file_header.write("struct cpuinfo_mock_property properties[] = {\n")
for key, value in adb_getprop():
file_header.write("\t{\n")
file_header.write("\t\t.key = \"%s\",\n" % c_escape(key))
file_header.write("\t\t.value = \"%s\",\n" % c_escape(value))
file_header.write("\t},\n")
file_header.write("\t{ NULL },\n")
file_header.write("};\n")
file_header.write("#endif /* __ANDROID__ */\n")
if proc_cpuinfo_content is not None:
with open(os.path.join("test", "cpuinfo", options.prefix + ".log"), "w") as proc_cpuinfo_dump:
proc_cpuinfo_dump.write(proc_cpuinfo_content)
if build_prop_content is not None:
with open(os.path.join("test", "build.prop", options.prefix + ".log"), "w") as build_prop_dump:
build_prop_dump.write(build_prop_content)
if __name__ == "__main__":
main(sys.argv[1:])
|
Teaonly/easyLearning.js
|
TensorExpress/aten/src/ATen/cpu/cpuinfo/scripts/android-filesystem-dump.py
|
Python
|
mit
| 13,760
|
[
"Galaxy"
] |
d145a0a2abe0f07f99defca9ceaf03bc521a73a494d5f408dcf332be1e6fead6
|
"""
Test backend attachment
"""
__RCSID__ = "$Id$"
import pytest
from DIRAC.FrameworkSystem.private.standardLogging.test.TestLogUtilities import gLogger, gLoggerReset, cleaningLog
def getContentFromFilename(backendOptions):
"""Get content from the file attached to a given backend and erase the content from the file."""
filename = backendOptions.get("FileName")
if not filename:
return None
# get the content of the file
with open(filename, "r") as fileContent:
content = fileContent.read()
# clean the content
lines = content.split("\n")
cleanContent = ""
for line in lines:
cleanContent += cleaningLog(line)
# reset the file
with open(filename, "w") as fileContent:
pass
return cleanContent
@pytest.mark.parametrize(
"backends",
[
(
{
"file1": {
"logger": "gLogger",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp"},
"extractBackendContent": getContentFromFilename,
"backendContent": "Framework NOTICE: msgFramework/log NOTICE: msgFramework/log/sublog NOTICE: msg",
}
}
),
(
{
"file2": {
"logger": "log",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp"},
"extractBackendContent": getContentFromFilename,
"backendContent": "Framework/log NOTICE: msgFramework/log/sublog NOTICE: msg",
}
}
),
(
{
"file3": {
"logger": "sublog",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp"},
"extractBackendContent": getContentFromFilename,
"backendContent": "Framework/log/sublog NOTICE: msg",
}
}
),
(
{
"file4": {
"logger": "gLogger",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp", "LogLevel": "error"},
"extractBackendContent": getContentFromFilename,
"backendContent": "",
}
}
),
(
{
"file5": {
"logger": "log",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp", "LogLevel": "error"},
"extractBackendContent": getContentFromFilename,
"backendContent": "",
}
}
),
(
{
"file6": {
"logger": "sublog",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp", "LogLevel": "error"},
"extractBackendContent": getContentFromFilename,
"backendContent": "",
}
}
),
(
{
"file7a": {
"logger": "gLogger",
"backendType": "file",
"backendOptions": {"FileName": "backend_test1.tmp"},
"extractBackendContent": getContentFromFilename,
"backendContent": "Framework NOTICE: msgFramework/log NOTICE: msgFramework/log/sublog NOTICE: msg",
},
"file7b": {
"logger": "gLogger",
"backendType": "file",
"backendOptions": {"FileName": "backend_test2.tmp"},
"extractBackendContent": getContentFromFilename,
"backendContent": "Framework NOTICE: msgFramework/log NOTICE: msgFramework/log/sublog NOTICE: msg",
},
}
),
],
)
def test_registerBackendgLogger(backends):
"""
Attach backends to gLogger, generate some logs from different loggers and check the content of the backends
"""
_, log, sublog = gLoggerReset()
# dictionary of available loggers
loggers = {"gLogger": gLogger, "log": log, "sublog": sublog}
# attach backends to the corresponding logger
for backend, params in backends.items():
logger = loggers[params["logger"]]
numberOfBackends = len(logger._backendsList)
logger.registerBackend(params["backendType"], params["backendOptions"])
# backend should be added to logger.backendList
assert len(logger._backendsList) == (numberOfBackends + 1)
# Generate logs from gLogger, log, sublog
gLogger.setLevel("notice")
gLogger.notice("msg")
log.notice("msg")
sublog.notice("msg")
# Check the content of the backends
for backend, params in backends.items():
content = params["extractBackendContent"](params["backendOptions"])
assert content == params["backendContent"]
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/private/standardLogging/test/Test_Logging_Backends.py
|
Python
|
gpl-3.0
| 5,140
|
[
"DIRAC"
] |
fc97e939af37e726f19d60a195c1494d92ed99793372f8adf234979abf59d0c6
|
import logging
from sms import SMSSender
class BaseBot(object):
"""Base class for bot classes"""
def __init__(self, browser, config, planets):
# Authenticate and get browser instance
self.browser = browser
self.config = config
# Get logger
self.logger = logging.getLogger('OGBot')
self.planets = planets
# Set Default origin planet
self.default_origin_planet = self.get_default_origin_planet(self.config.default_origin_planet_name)
# self.planet will be None if the user doesn't specifies a valid planet name
self.planet = self.get_player_planet_by_name(config.planet_name)
self.sms_sender = SMSSender(config)
# Util functions
def get_player_planet_by_name(self, planet_name):
"""Get player planet by name. If there is no match returns None"""
planets = self.planets
if planet_name is None:
return None
planet = next(iter([planet for planet
in planets
if planet.name.lower() == planet_name.lower()]), None)
return planet
def get_default_origin_planet(self, planet_name):
if planet_name is None:
return self.planets[0]
else:
return self.get_player_planet_by_name(planet_name)
@staticmethod
def get_nearest_planet_to_coordinates(coordinates, planets):
"""Get the nearest planet to the target coordinates"""
# Get the closest galaxy
target_galaxy = int(coordinates.split(':')[0])
planet_galaxies = set([int(planet.coordinates.split(':')[0]) for planet in planets])
closest_galaxy = min(planet_galaxies, key=lambda x: abs(x - target_galaxy))
# Get the closest system
target_system = int(coordinates.split(':')[1])
planet_systems = [int(planet.coordinates.split(':')[1])
for planet in planets
if planet.coordinates.split(':')[0] == str(closest_galaxy)]
closest_system = min(planet_systems, key=lambda x: abs(x - target_system))
planet = next((planet
for planet
in planets
if planet.coordinates.split(":")[0] == str(target_galaxy)
and planet.coordinates.split(":")[1] == str(closest_system)
), None)
if planet is None:
raise EnvironmentError("Error getting closest planet from target")
else:
return planet
|
winiciuscota/OG-Bot
|
ogbot/core/base.py
|
Python
|
mit
| 2,566
|
[
"Galaxy"
] |
e99cfc39fde5b6124f3b9962fd5bc977062599f031ca0df7f6650b1f0725ec90
|
import numpy as np
from .control import model_setup
from .cp_confocal import threed
from .cp_triplet import trip
def CF_Gxyz_blink(parms, tau):
u""" Three-dimanesional free diffusion with a Gaussian laser profile
(eliptical), including a triplet component.
The triplet factor takes into account a blinking term.
Set *T* or *τ_trip* to 0, if no triplet component is wanted.
G(τ) = offset + 1/( n*(1+τ/τ_diff) * sqrt(1 + τ/(SP²*τ_diff)) )
* ( 1+T/(1.-T)*exp(-τ/τ_trip) )
Calculation of diffusion coefficient and concentration
from the effective radius of the detection profile (r₀ = 2*σ):
D = r₀²/(4*τ_diff)
Conc = n/( sqrt(π³)*r₀²*z₀ )
*parms* - a list of parameters.
Parameters (parms[i]):
[0] n Effective number of particles in confocal volume
[1] T Fraction of particles in triplet (non-fluorescent) state
0 <= T < 1
[2] τ_trip Characteristic residence time in triplet state
[3] τ_diff Characteristic residence time in confocal volume
[4] SP SP=z₀/r₀ Structural parameter,
describes the axis ratio of the confocal volume
[5] offset
*tau* - lag time
"""
n = parms[0]
T = parms[1]
tautrip = parms[2]
taudiff = parms[3]
SP = parms[4]
off = parms[5]
AA = trip(tau, tautrip, T)
BB = threed(tau, taudiff, SP)
G = off + 1/n * AA * BB
return G
def supplements(parms, countrate=None):
# We can only give you the effective particle number
n = parms[0]
Info = list()
if countrate is not None:
# CPP
cpp = countrate/n
Info.append(["cpp [kHz]", cpp])
return Info
parms = [4.0, 0.2, 0.001, 0.4, 5.0, 0.0]
# Boundaries
boundaries = [[0, np.inf]]*len(parms)
# T
boundaries[1] = [0, .9999999999999]
boundaries[-1] = [-np.inf, np.inf]
model_setup(
modelid=6011,
name="3D diffusion with triplet (confocal)",
comp="T+3D",
mtype="Confocal (Gaussian) and triplet",
fctn=CF_Gxyz_blink,
par_labels=[
u"n",
u"T",
u"τ_trip [ms]",
u"τ_diff [ms]",
u"SP",
u"offset"],
par_values=parms,
par_vary=[True, True, True, True, False, False],
par_boundaries=boundaries,
par_constraints=[[3, ">", 2]],
par_hr_labels=[
u"n",
u"T",
u"τ_trip [µs]",
u"τ_diff [ms]",
u"SP",
u"offset"],
par_hr_factors=[1., 1., 1000., 1., 1., 1.],
supplementary_method=supplements
)
|
paulmueller/PyCorrFit
|
pycorrfit/models/model_confocal_t_3d.py
|
Python
|
gpl-2.0
| 2,628
|
[
"Gaussian"
] |
c3b2833d6443a448f1c7a6da09190a4837587504d791c3a2845a768bd0c28286
|
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# IPython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 5
_version_minor = 1
_version_patch = 0
_version_extra = '.dev'
# _version_extra = 'rc1'
_version_extra = '' # Uncomment this for full releases
# release.codename is deprecated in 2.0, will be removed in 3.0
codename = ''
# Construct full version string from these.
_ver = [_version_major, _version_minor, _version_patch]
__version__ = '.'.join(map(str, _ver))
if _version_extra:
__version__ = __version__ + _version_extra
version = __version__ # backwards compatibility name
version_info = (_version_major, _version_minor, _version_patch, _version_extra)
# Change this when incrementing the kernel protocol version
kernel_protocol_version_info = (5, 0)
kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
description = "IPython: Productive Interactive Computing"
long_description = \
"""
IPython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
* A powerful interactive Python shell
* A `Jupyter <http://jupyter.org/>`_ kernel to work with Python code in Jupyter
notebooks and other interactive frontends.
The enhanced interactive Python shells have the following main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Extensible tab completion, with support by default for completion of python
variables and keywords, filenames and function keywords.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* A rich configuration system with easy switching between different setups
(simpler than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs and GUIs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available from IPython's `GitHub
site <http://github.com/ipython>`_.
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@scipy.org'
url = 'http://ipython.org'
platforms = ['Linux','Mac OSX','Windows']
keywords = ['Interactive','Interpreter','Shell', 'Embedding']
classifiers = [
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Shells'
]
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/IPython/core/release.py
|
Python
|
mit
| 4,386
|
[
"Brian"
] |
ab985e17f5b104735f89f394fe67a512e55dce7ee226a6cbd4b6f99aa24e6526
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Stephane Charette <stephanecharette@gmail.com>
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013-2014 Paul Franklin
# Copyright (C) 2015 Detlef Wolz <detlef.wolz@t-online.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Generate an hourglass graph using the Graphviz generator.
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import (PersonOption, BooleanOption, NumberOption,
EnumeratedListOption, ColorOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.proxy import CacheProxyDb
#------------------------------------------------------------------------
#
# Constant options items
#
#------------------------------------------------------------------------
_COLORS = [{'name' : _("B&W outline"), 'value' : "outline"},
{'name' : _("Colored outline"), 'value' : "colored"},
{'name' : _("Color fill"), 'value' : "filled"}]
_ARROWS = [ { 'name' : _("Center -> Others"), 'value' : 'o' },
{ 'name' : _("Center <- Others"), 'value' : 'c' },
{ 'name' : _("Center <-> Other"), 'value' : 'co' },
{ 'name' : _("Center - Other"), 'value' : '' }]
#------------------------------------------------------------------------
#
# HourGlassReport
#
#------------------------------------------------------------------------
class HourGlassReport(Report):
"""
An hourglass report displays ancestors and descendants of a center person.
"""
def __init__(self, database, options, user):
"""
Create HourGlass object that produces the report.
name_format - Preferred format to display names
incl_private - Whether to include private data
inc_id - Whether to include IDs.
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.set_locale(menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, menu)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, self._locale)
self.database = CacheProxyDb(self.database)
self.__db = self.database
self.__used_people = []
self.__family_father = [] # links allocated from family to father
self.__family_mother = [] # links allocated from family to mother
self.max_descend = menu.get_option_by_name('maxdescend').get_value()
self.max_ascend = menu.get_option_by_name('maxascend').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.center_person = self.__db.get_person_from_gramps_id(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
self.colorize = menu.get_option_by_name('color').get_value()
self.colors = {'male': menu.get_option_by_name('colormales').get_value(),
'female': menu.get_option_by_name('colorfemales').get_value(),
'unknown': menu.get_option_by_name('colorunknown').get_value(),
'family': menu.get_option_by_name('colorfamilies').get_value()
}
self.roundcorners = menu.get_option_by_name('roundcorners').get_value()
self.includeid = menu.get_option_by_name('inc_id').get_value()
arrow_str = menu.get_option_by_name('arrow').get_value()
if 'o' in arrow_str:
self.arrowheadstyle = 'normal'
else:
self.arrowheadstyle = 'none'
if 'c' in arrow_str:
self.arrowtailstyle = 'normal'
else:
self.arrowtailstyle = 'none'
stdoptions.run_name_format_option(self, menu)
def write_report(self):
"""
Generate the report.
"""
self.add_person(self.center_person)
self.traverse_up(self.center_person, 1)
self.traverse_down(self.center_person, 1)
def traverse_down(self, person, gen):
"""
Recursively find the descendants of the given person.
"""
if gen > self.max_descend:
return
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
self.add_family(family)
self.doc.add_link(person.get_gramps_id(), family.get_gramps_id(),
head=self.arrowheadstyle,
tail=self.arrowtailstyle)
for child_ref in family.get_child_ref_list():
child_handle = child_ref.get_reference_handle()
if child_handle not in self.__used_people:
# Avoid going down paths twice when descendant cousins marry
self.__used_people.append(child_handle)
child = self.__db.get_person_from_handle(child_handle)
self.add_person(child)
self.doc.add_link(family.get_gramps_id(),
child.get_gramps_id(),
head=self.arrowheadstyle,
tail=self.arrowtailstyle)
self.traverse_down(child, gen+1)
def traverse_up(self, person, gen):
"""
Recursively find the ancestors of the given person.
"""
if gen > self.max_ascend:
return
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.__db.get_family_from_handle(family_handle)
family_id = family.get_gramps_id()
self.add_family(family)
self.doc.add_link(family_id, person.get_gramps_id(),
head=self.arrowtailstyle,
tail=self.arrowheadstyle )
# create link from family to father
father_handle = family.get_father_handle()
if father_handle and family_handle not in self.__family_father:
# allocate only one father per family
self.__family_father.append(family_handle)
father = self.__db.get_person_from_handle(father_handle)
self.add_person(father)
self.doc.add_link(father.get_gramps_id(), family_id,
head=self.arrowtailstyle,
tail=self.arrowheadstyle )
# no need to go up if he is a father in another family
if father_handle not in self.__used_people:
self.__used_people.append(father_handle)
self.traverse_up(father, gen+1)
# create link from family to mother
mother_handle = family.get_mother_handle()
if mother_handle and family_handle not in self.__family_mother:
# allocate only one mother per family
self.__family_mother.append(family_handle)
mother = self.__db.get_person_from_handle(mother_handle)
self.add_person(mother)
self.doc.add_link(mother.get_gramps_id(), family_id,
head=self.arrowtailstyle,
tail=self.arrowheadstyle)
# no need to go up if she is a mother in another family
if mother_handle not in self.__used_people:
self.__used_people.append(mother_handle)
self.traverse_up(mother, gen+1)
def add_person(self, person):
"""
Add a person to the Graph. The node id will be the person's gramps id.
"""
p_id = person.get_gramps_id()
name = self._name_display.display(person)
name = name.replace('"', '"')
name = name.replace('<', '<').replace('>', '>')
birth_evt = get_birth_or_fallback(self.__db, person)
if birth_evt:
birth = self._get_date(birth_evt.get_date_object())
else:
birth = ""
death_evt = get_death_or_fallback(self.__db, person)
if death_evt:
death = self._get_date(death_evt.get_date_object())
else:
death = ""
if self.includeid == 0: # no ID
label = "%s \\n(%s - %s)" % (name, birth, death)
elif self.includeid == 1: # same line
label = "%s (%s)\\n(%s - %s)" % (name, p_id, birth, death)
elif self.includeid == 2: # own line
label = "%s \\n(%s - %s)\\n(%s)" % (name, birth, death, p_id)
label = label.replace('"', '\\\"')
(shape, style, color, fill) = self.get_gender_style(person)
self.doc.add_node(p_id, label, shape, color, style, fill)
def add_family(self, family):
"""
Add a family to the Graph. The node id will be the family's gramps id.
"""
family_id = family.get_gramps_id()
label = ""
marriage = utils.find_marriage(self.__db, family)
if marriage:
label = self._get_date(marriage.get_date_object())
if self.includeid == 1 and label: # same line
label = "%s (%s)" % (label, family_id)
elif self.includeid == 1 and not label:
label = "(%s)" % family_id
elif self.includeid == 2 and label: # own line
label = "%s\\n(%s)" % (label, family_id)
elif self.includeid == 2 and not label:
label = "(%s)" % family_id
color = ""
fill = ""
style = "solid"
if self.colorize == 'colored':
color = self.colors['family']
elif self.colorize == 'filled':
fill = self.colors['family']
style = "filled"
self.doc.add_node(family_id, label, "ellipse", color, style, fill)
def get_gender_style(self, person):
"return gender specific person style"
gender = person.get_gender()
shape = "box"
style = "solid"
color = ""
fill = ""
if gender == person.FEMALE and self.roundcorners:
style = "rounded"
elif gender == person.UNKNOWN:
shape = "hexagon"
if self.colorize == 'colored':
if gender == person.MALE:
color = self.colors['male']
elif gender == person.FEMALE:
color = self.colors['female']
else:
color = self.colors['unknown']
elif self.colorize == 'filled':
style += ",filled"
if gender == person.MALE:
fill = self.colors['male']
elif gender == person.FEMALE:
fill = self.colors['female']
else:
fill = self.colors['unknown']
return(shape, style, color, fill)
#------------------------------------------------------------------------
#
# HourGlassOptions
#
#------------------------------------------------------------------------
class HourGlassOptions(MenuReportOptions):
"""
Defines options for the HourGlass report.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Create all the menu options for this report.
"""
category_name = _("Report Options")
pid = PersonOption(_("Center Person"))
pid.set_help(_("The Center person for the graph"))
menu.add_option(category_name, "pid", pid)
max_gen_d = NumberOption(_('Max Descendant Generations'), 10, 1, 15)
max_gen_d.set_help(_("The number of generations of descendants to "
"include in the graph"))
menu.add_option(category_name, "maxdescend", max_gen_d)
max_gen_a = NumberOption(_('Max Ancestor Generations'), 10, 1, 15)
max_gen_a.set_help(_("The number of generations of ancestors to "
"include in the graph"))
menu.add_option(category_name, "maxascend", max_gen_a)
arrow = EnumeratedListOption(_("Arrowhead direction"), 'o')
for i in range( 0, len(_ARROWS) ):
arrow.add_item(_ARROWS[i]["value"], _ARROWS[i]["name"])
arrow.set_help(_("Choose the direction that the arrows point."))
menu.add_option(category_name, "arrow", arrow)
color = EnumeratedListOption(_("Graph coloring"), "filled")
for i in range(0, len(_COLORS)):
color.add_item(_COLORS[i]["value"], _COLORS[i]["name"])
color.set_help(_("Males will be shown with blue, females "
"with red. If the sex of an individual "
"is unknown it will be shown with gray."))
menu.add_option(category_name, "color", color)
roundedcorners = BooleanOption(_("Use rounded corners"), False) # 2180
roundedcorners.set_help(
_("Use rounded corners to differentiate between women and men."))
menu.add_option(category_name, "roundcorners", roundedcorners)
stdoptions.add_gramps_id_option(menu, category_name, ownline=True)
category_name = _("Report Options (2)")
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
################################
category_name = _("Graph Style")
################################
color_males = ColorOption(_('Males'), '#e0e0ff')
color_males.set_help(_('The color to use to display men.'))
menu.add_option(category_name, 'colormales', color_males)
color_females = ColorOption(_('Females'), '#ffe0e0')
color_females.set_help(_('The color to use to display women.'))
menu.add_option(category_name, 'colorfemales', color_females)
color_unknown = ColorOption(_('Unknown'), '#e0e0e0')
color_unknown.set_help(_('The color to use '
'when the gender is unknown.'))
menu.add_option(category_name, 'colorunknown', color_unknown)
color_family = ColorOption(_('Families'), '#ffffe0')
color_family.set_help(_('The color to use to display families.'))
menu.add_option(category_name, 'colorfamilies', color_family)
|
prculley/gramps
|
gramps/plugins/graph/gvhourglass.py
|
Python
|
gpl-2.0
| 16,020
|
[
"Brian"
] |
2c241246ba0848080284a35bb6e8cf966720e8cd9bda734ee9f58aeee0233873
|
#!/usr/bin/python
"""Test of label guess functionality."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"1. Next form field",
["BRAILLE LINE: 'search mozilla: $l'",
" VISIBLE: 'search mozilla: $l', cursor=17",
"SPEECH OUTPUT: 'search mozilla: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"2. Next form field",
["BRAILLE LINE: 'Go push button'",
" VISIBLE: 'Go push button', cursor=1",
"SPEECH OUTPUT: 'Go push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"3. Next form field",
["BRAILLE LINE: 'Your email address: $l'",
" VISIBLE: 'Your email address: $l', cursor=21",
"SPEECH OUTPUT: 'Your email address: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"4. Next form field",
["BRAILLE LINE: 'Your name (optional): $l'",
" VISIBLE: 'Your name (optional): $l', cursor=23",
"SPEECH OUTPUT: 'Your name (optional): entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"5. Next form field",
["BRAILLE LINE: 'Pick a password: $l'",
" VISIBLE: 'Pick a password: $l', cursor=18",
"SPEECH OUTPUT: 'Pick a password: password text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"6. Next form field",
["BRAILLE LINE: 'Reenter password to confirm: $l'",
" VISIBLE: 'Reenter password to confirm: $l', cursor=30",
"SPEECH OUTPUT: 'Reenter password to confirm: password text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"7. Next form field",
["BRAILLE LINE: '&=y No radio button'",
" VISIBLE: '&=y No radio button', cursor=1",
"SPEECH OUTPUT: 'No.'",
"SPEECH OUTPUT: 'selected radio button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"8. Next form field",
["BRAILLE LINE: '& y Yes radio button'",
" VISIBLE: '& y Yes radio button', cursor=1",
"SPEECH OUTPUT: 'Yes.'",
"SPEECH OUTPUT: 'not selected radio button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"9. Next form field",
["BRAILLE LINE: 'Subscribe push button'",
" VISIBLE: 'Subscribe push button', cursor=1",
"SPEECH OUTPUT: 'Subscribe push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"10. Next form field",
["BRAILLE LINE: 'Admin address: $l'",
" VISIBLE: 'Admin address: $l', cursor=16",
"SPEECH OUTPUT: 'Admin address: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"11. Next form field",
["BRAILLE LINE: 'Password: $l'",
" VISIBLE: 'Password: $l', cursor=11",
"SPEECH OUTPUT: 'Password: password text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"12. Next form field",
["BRAILLE LINE: 'Visit Subscriber List push button'",
" VISIBLE: 'Visit Subscriber List push butto', cursor=1",
"SPEECH OUTPUT: 'Visit Subscriber List push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"13. Next form field",
["BRAILLE LINE: 'subscription email address: $l'",
" VISIBLE: 'subscription email address: $l', cursor=29",
"SPEECH OUTPUT: 'subscription email address: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"14. Next form field",
["BRAILLE LINE: 'Unsubscribe or edit options push button'",
" VISIBLE: 'Unsubscribe or edit options push', cursor=1",
"SPEECH OUTPUT: 'Unsubscribe or edit options push button'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
chrys87/orca-beep
|
test/keystrokes/firefox/label_inference_mailman.py
|
Python
|
lgpl-2.1
| 6,487
|
[
"VisIt"
] |
a2e2c8142e791677583bbc3b89ccbecd0263e53ddfd8f7d1fdf28e647336f0dd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.