text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
'''
Created : Jan 16, 2017
Last major update : June 29, 2017
@author: Alexandre Day
Purpose:
Fast density clustering
'''
import numpy as np
import time
from numpy.random import random
import sys, os
from .density_estimation import KDE
import pickle
from collections import OrderedDict as OD
from sklearn.neighbors import NearestNeighbors
import multiprocessing
class FDC:
""" Fast Density Clustering via kernel density modelling for low-dimensional data (D <~ 8)
Parameters
----------
nh_size : int, optional (default = 'auto')
Neighborhood size. This is the scale used for identifying the initial modes in the density distribution, regardless
of the covariance. If a point has the maximum density among it's nh_size neighbors, it is marked as
a potential cluster center. 'auto' means that the nh_size is scaled with number of samples. We
use nh_size = 100 for 10000 samples. The minimum neighborhood size is set to 10.
eta : float, optional (default = 0.4)
Noise threshold used to merge clusters. This is done by quenching directly to the specified noise threshold
(as opposed to progressively coarse-graining). The noise threshold determines the extended
neighborhood of cluster centers. Points that have a relative density difference of less than
"noise_threshold" and that are density-reachable, are part of the extended neighborhood.
random_state: int, optional (default = 0)
Random number for seeding random number generator. By default, the
method generates the same results. This random is used to seed
the cross-validation (set partitions) which will in turn affect the bandwitdth value
test_ratio_size: float, optional (default = 0.8)
Ratio size of the test set used when performing maximum likehood estimation.
In order to have smooth density estimations (prevent overfitting), it is recommended to
use a large test_ratio_size (closer to 1.0) rather than a small one.
verbose: int, optional (default = 1)
Set to 0 if you don't want to see print to screen.
bandwidth: float, optional (default = None)
If you want the bandwidth for kernel density to be set automatically or want to set it yourself.
By default it is set automatically.
merge: bool, optinal (default = True)
Optional merging at zero noise threshold, merges overlapping minimal clusters
atol: float, optional (default = 0.000005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
rtol: float, optional (default = 0.00005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
xtol: float, optional (default = 0.01)
precision parameter for optimizing the bandwidth using maximum likelihood on a test set
search_size: int, optional (default = 20)
when performing search over neighborhoods, size of each local neighborhood to check when
expanding. This drastically slows the coarse-graining if chosen to be too big !
kernel: str, optional (default='gaussian')
Type of Kernel to use for density estimates. Other options are {'epanechnikov'|'linear','tophat'}.
"""
def __init__(self, nh_size='auto', eta=0.5,
random_state=0, test_ratio_size=0.8, verbose=1, bandwidth=None,
merge=True,
atol=0.01,
rtol=0.0001,
xtol=0.01,
search_size = 20,
n_cluster_init = None,
kernel = 'gaussian',
n_job='auto'
):
self.test_ratio_size = test_ratio_size
self.random_state = random_state
self.verbose = verbose
self.nh_size = nh_size
self.bandwidth = bandwidth
self.eta = eta
self.merge = merge
self.atol = atol
self.rtol = rtol
self.xtol = xtol
self.cluster_label = None
self.search_size = search_size
self.n_cluster_init = n_cluster_init
self.kernel = kernel
self.nbrs= None
self.nn_dist= None
self.nn_list= None
self.density_model = None
if n_job == 'auto':
self.n_job=multiprocessing.cpu_count()
else:
if n_job > multiprocessing.cpu_count():
self.n_job=multiprocessing.cpu_count()
else:
self.n_job=n_job
def fit(self, X):
""" Performs density clustering on given data set
Parameters
----------
X : array, (n_sample, n_feature)
Data to cluster.
Returns
----------
self : fdc object
To obtain new cluster labels use self.cluster_label
"""
t = time.time()
self.X = X # shallow copy
self.n_sample = X.shape[0]
if self.n_sample < 10:
assert False, "Too few samples for computing densities !"
if self.nh_size is 'auto':
self.nh_size = max([int(25*np.log10(self.n_sample)), 10])
if self.search_size > self.nh_size:
self.search_size = self.nh_size
if self.verbose == 0:
blockPrint()
self.display_main_parameters()
print("[fdc] Starting clustering with n=%i samples..." % X.shape[0])
start = time.time()
print("[fdc] Fitting kernel model for density estimation ...")
self.fit_density(X)
#print("here")
print("[fdc] Finding centers ...")
self.compute_delta(X, self.rho)
print("[fdc] Found %i potential centers ..." % self.idx_centers_unmerged.shape[0])
# temporary idx for the centers :
self.idx_centers = self.idx_centers_unmerged
self.cluster_label = assign_cluster(self.idx_centers_unmerged, self.nn_delta, self.density_graph)
if self.merge: # usually by default one should perform this minimal merging ..
print("[fdc] Merging overlapping minimal clusters ...")
self.check_cluster_stability_fast(X, 0.) # given
if self.eta >= 1e-3 :
print("[fdc] Iterating up to specified noise threshold ...")
self.check_cluster_stability_fast(X, self.eta) # merging 'unstable' clusters
print("[fdc] Done in %.3f s" % (time.time()-start))
enablePrint()
return self
def save(self, name=None):
""" Saves current model to specified path 'name' """
if name is None:
fname = self.make_file_name()
else:
fname = name
fopen = open(fname,'wb')
pickle.dump(self,fopen)
fopen.close()
return fname
def load(self, name=None):
if name is None:
name = self.make_file_name()
self.__dict__.update(pickle.load(open(name,'rb')).__dict__)
return self
def fit_density(self, X):
# nearest neighbors class
self.nbrs = NearestNeighbors(n_neighbors = self.nh_size, algorithm='kd_tree').fit(X)
# get k-NN
self.nn_dist, self.nn_list = self.nbrs.kneighbors(X)
# density model class
self.density_model = KDE(bandwidth=self.bandwidth, test_ratio_size=self.test_ratio_size,
atol=self.atol, rtol=self.rtol, xtol=self.xtol, nn_dist = self.nn_dist, kernel=self.kernel)
# fit density model to data
self.density_model.fit(X)
# save bandwidth
self.bandwidth = self.density_model.bandwidth
# compute density map based on kernel density model
if (self.n_sample > 30000) & (self.n_job !=1) :
print("[fdc] Computing density with %i threads..."%self.n_job)
p = multiprocessing.Pool(self.n_job)
size_split = X.shape[0]//self.n_job
results =[]
idx_split = chunkIt(len(X), self.n_job) # find the index to split the array in approx. n_job equal parts.
for i in range(self.n_job):
results.append(p.apply_async(self.f_tmp, [X[idx_split[i][0]:idx_split[i][1]], i]))
results = [res.get() for res in results]
asort = np.argsort([results[i][0] for i in range(self.n_job)]) # reordering
#print(asort)
self.rho=np.hstack([results[a][1] for a in asort])
else:
print("[fdc] Computing density with 1 thread...")
self.rho = self.density_model.evaluate_density(X)
return self
def f_tmp(self, X_, i_):
"""evaluating density and keeping track of threading order"""
return (i_, self.density_model.evaluate_density(X_))
#@profile
def coarse_grain(self, noise_iterable):
"""Started from an initial noise scale, progressively merges clusters.
If specified, saves the cluster assignments at every level of the coarse graining.
Parameters
-----------
noise_iterable : iterable of floats
Should be an iterable containg noise values at which to perform coarse graining. Usually
one should start from 0 and go to larger values by small increments. The whole clustering
information is stored in self.hierarchy
Return
------
self
"""
if self.verbose == 0:
blockPrint()
print("[fdc] Coarse graining until desired noise threshold ...")
noise_range = [n for n in noise_iterable]
#hierarchy = []
self.max_noise = -1
n_cluster = 0
# note to self, if no merger is done, no need to store hierarchy ... just work with noise_range dict ...
for nt in noise_range:
if self.n_cluster_init is not None:
if len(self.idx_centers) < self.n_cluster_init:
print("[fdc.py] Reached number of specified clusters [= %i] (or close to), n_cluster = %i"%(self.n_cluster_init,len(self.idx_centers)))
break
self.check_cluster_stability_fast(self.X, eta = nt)
#hierarchy.append(OD({'idx_centers': self.idx_centers, 'cluster_labels': self.cluster_label})) # -> the only required information <-
if len(self.idx_centers) != n_cluster:
n_cluster = len(self.idx_centers)
self.max_noise = nt
#self.hierarchy = hierarchy
self.noise_range = noise_range
self.noise_threshold = noise_range[-1]
enablePrint()
return self
#@profile
def compute_delta(self, X, rho = None):
"""
Purpose:
Computes distance to nearest-neighbor with higher density
Return:
delta,nn_delta,idx_centers,density_graph
:delta: distance to n.n. with higher density (within some neighborhood cutoff)
:nn_delta: index of n.n. with ...
:idx_centers: list of points that have the largest density in their neigborhood cutoff
:density_graph: for every point, list of points are incoming (via the density gradient)
"""
if rho is None:
rho = self.rho
n_sample, n_feature = X.shape
maxdist = np.linalg.norm([np.max(X[:,i])-np.min(X[:,i]) for i in range(n_feature)])
delta = maxdist*np.ones(n_sample, dtype=np.float)
nn_delta = np.ones(n_sample, dtype=np.int)
density_graph = [[] for i in range(n_sample)] # store incoming leaves
### ----------->
nn_list = self.nn_list # restricted over neighborhood (nh_size)
### ----------->
for i in range(n_sample):
idx = index_greater(rho[nn_list[i]])
if idx:
density_graph[nn_list[i,idx]].append(i)
nn_delta[i] = nn_list[i,idx]
delta[i] = self.nn_dist[i,idx]
else:
nn_delta[i]=-1
idx_centers=np.array(range(n_sample))[delta > 0.999*maxdist]
self.delta = delta
self.nn_delta = nn_delta
self.idx_centers_unmerged = idx_centers
self.density_graph = density_graph
return self
def estimate_eta(self):
""" Based on the density distribution, computes a scale for eta
Need more experimenting, this is not quite working ...
"""
from matplotlib import pyplot as plt
idx = int(self.n_sample/10.)
idx = np.argsort(self.rho)[:-5*idx]#[2:idx:4*idx]
drho = []
for i in idx:
rho_init = self.rho[i]
nn_i = self.nn_delta[i]
while nn_i != -1:
rho_c = self.rho[nn_i]
nn_i = self.nn_delta[nn_i]
drho.append(rho_c- rho_init)
""" plt.his(drho,bins=60)
plt.show()
exit() """
eta = np.mean(drho)#+0.5*np.std(drho)
self.cout("Using std eta of %.3f"%eta)
return eta
def check_cluster_stability_fast(self, X, eta = None): # given
if self.verbose == 0:
blockPrint()
if eta is None:
eta = self.eta
while True: # iterates untill number of cluster does not change ...
self.cluster_label = assign_cluster(self.idx_centers_unmerged, self.nn_delta, self.density_graph) # first approximation of assignments
self.idx_centers, n_false_pos = check_cluster_stability(self, X, eta)
self.idx_centers_unmerged = self.idx_centers
if n_false_pos == 0:
print(" # of stable clusters with noise %.6f : %i" % (eta, self.idx_centers.shape[0]))
break
enablePrint()
""" def get_cluster_info(self, eta = None):
if eta is None:
return self.cluster_label, self.idx_centers
else:
pos = np.argmin(np.abs(np.array(self.noise_range)-eta))
#delta_ = self.noise_range[pos]
#idx_centers = self.hierarchy[pos]['idx_centers']
cluster_label = self.hierarchy[pos]['cluster_labels']
idx_center = self.hierarchy[pos]['idx_centers']
return cluster_label, idx_center """
""" def update_labels(self, idx_centers, cluster_label):
self.idx_centers = idx_centers
self.cluster_label = cluster_label """
#@profile
def find_NH_tree_search(self, idx, eta, cluster_label):
"""
Function for searching for nearest neighbors within some density threshold.
NH should be an empty set for the inital function call.
Note to myself : lots of optimization, this is pretty time/memory consumming !
Parameters
-----------
idx : int
index of the cluster centroid to start from
eta : float
maximum density you can spill over (this is "density_center - eta")
cluster_label: array of int
cluster label for every datapoint.
Returns
-----------
List of points in the neighborhood of point idx : 1D array
"""
rho = self.rho
zero_array = np.zeros(len(self.nn_list),dtype=bool)
nn_list = self.nn_list
zero_array[nn_list[idx, :self.search_size]] = True
new_leaves = zero_array
is_NH = (rho > eta) & (new_leaves)
current_label = cluster_label[idx]
# This could probably be improved, but at least it's fully vectorized and scalable (NlogN in time and N in memory)
while True:
update = False
leaves=np.copy(new_leaves)
#y_leave = cluster_label[leaves]
leaves_cluster = (leaves) & (cluster_label == current_label)
new_leaves=np.zeros(len(self.nn_list), dtype=bool)
nn_leaf = np.unique(nn_list[leaves_cluster][:self.search_size].flatten())
res = nn_leaf[is_NH[nn_leaf]==False]
pos = np.where(rho[res] > eta)[0]
if len(pos) > 0: update=True
is_NH[res[pos]] = True
new_leaves[res[pos]] = True
if update is False:
break
return np.where(is_NH)[0]
def find_NH_tree_search_v1(self, idx, eta, cluster_label):
"""
Function for searching for nearest neighbors within
some density threshold.
NH should be an empty set for the inital function call.
Note to myself : lots of optimization, this is pretty time consumming !
Returns
-----------
List of points in the neighborhood of point idx : 1D array
"""
rho = self.rho
nn_list = self.nn_list
new_leaves=nn_list[idx][:self.search_size]
is_NH = np.zeros(len(self.nn_list),dtype=np.int)
is_NH[new_leaves[rho[new_leaves] > eta]] = 1
current_label = cluster_label[idx]
# ideally here we cythonize what's below... this is highly ineficient ...
while True:
update = False
leaves=np.hstack(new_leaves)
new_leaves=[]
y_leave = cluster_label[leaves]
leaves_cluster = leaves[y_leave == current_label]
nn_leaf = nn_list[leaves_cluster]
for i in range(1, self.search_size):
res = nn_leaf[is_NH[nn_leaf[:,i]] == 0, i]
pos = np.where(rho[res] > eta)[0]
if len(pos) > 0: update=True
is_NH[res[pos]] = 1
new_leaves.append(res[pos])
if update is False:
break
return np.where(is_NH == 1)[0]
def cout(self, s):
print('[fdc] '+s)
def make_file_name(self):
t_name = "fdc_nhSize=%i_eta=%.3f_ratio=%.2f.pkl"
return t_name%(self.nh_size, self.eta, self.test_ratio_size)
""" def compute_coarse_grain_graph(self):
graph = {}
for idx in self.idx_centers: # at some scale
NH = self.find_NH_tree_search(idx, eta, cluster_label)
label_centers_nn = np.unique([cluster_label[ni] for ni in NH]) """
def display_main_parameters(self):
if self.eta is not 'auto':
eta = "%.3f"%self.eta
else:
eta = self.eta
out = [
"[fdc] {0:<20s}{1:<4s}{2:<6d}".format("nh_size",":",self.nh_size),
"[fdc] {0:<20s}{1:<4s}{2:<6s}".format("eta",":",eta),
"[fdc] {0:<20s}{1:<4s}{2:<6s}".format("merge",":",str(self.merge)),
"[fdc] {0:<20s}{1:<4s}{2:<6d}".format("search_size",":",self.search_size),
"[fdc] {0:<20s}{1:<4s}{2:<6.3f}".format("test_size_ratio",":",self.test_ratio_size)
]
for o in out:
print(o)
def reset(self):
self.bandwidth = None
#####################################################
#####################################################
############ utility functions below ################
#####################################################
#####################################################
def check_cluster_stability(self, X, threshold):
"""
Given the identified cluster centers, performs a more rigourous
neighborhood search (based on some noise threshold) for points with higher densities.
This is vaguely similar to a watershed cuts in image segmentation and basically
makes sure we haven't identified spurious cluster centers w.r.t to some noise threshold (false positive).
This has bad memory complexity, needs improvement if we want to run on N>10^5 data points.
"""
density_graph = self.density_graph
nn_delta = self.nn_delta
delta = self.delta
rho = self.rho
nn_list = self.nn_list
idx_centers = self.idx_centers_unmerged
cluster_label = self.cluster_label
n_false_pos = 0
idx_true_centers = []
for idx in idx_centers:
rho_center = rho[idx]
delta_rho = rho_center - threshold
if threshold < 1e-3: # just check nn_list ...
NH=nn_list[idx][1:self.search_size]
else:
NH = self.find_NH_tree_search(idx, delta_rho, cluster_label)
#print(len(NH))
label_centers_nn = np.unique(self.cluster_label[NH])#[cluster_label[ni] for ni in NH])
idx_max = idx_centers[ label_centers_nn[np.argmax(rho[idx_centers[label_centers_nn]])] ]
rho_current = rho[idx]
if ( rho_current < rho[idx_max] ) & ( idx != idx_max ) :
nn_delta[idx] = idx_max
delta[idx] = np.linalg.norm(X[idx_max]-X[idx])
density_graph[idx_max].append(idx)
n_false_pos+=1
else:
idx_true_centers.append(idx)
return np.array(idx_true_centers,dtype=np.int), n_false_pos
def assign_cluster(idx_centers, nn_delta, density_graph):
"""
Given the cluster centers and the local gradients (nn_delta) assign to every
point a cluster label
"""
n_center = idx_centers.shape[0]
n_sample = nn_delta.shape[0]
cluster_label = -1*np.ones(n_sample,dtype=np.int) # reinitialized every time.
for c, label in zip(idx_centers, range(n_center) ):
cluster_label[c] = label
assign_cluster_deep(density_graph[c], cluster_label, density_graph, label)
return cluster_label
def assign_cluster_deep(root,cluster_label,density_graph,label):
"""
Recursive function for assigning labels for a tree graph.
Stopping condition is met when the root is empty (i.e. a leaf has been reached)
"""
if not root: # then must be a leaf !
return
else:
for child in root:
cluster_label[child]=label
assign_cluster_deep(density_graph[child],cluster_label,density_graph,label)
def index_greater(array, prec=1e-8):
"""
Purpose:
Function for finding first item in an array that has a value greater than the first element in that array
If no element is found, returns None
Precision:
1e-8
Return:
int or None
"""
item=array[0]
for idx, val in np.ndenumerate(array): # slow ! : could be cythonized
if val > (item + prec):
return idx[0]
def blockPrint():
"""Blocks printing to screen"""
sys.stdout = open(os.devnull, 'w')
def enablePrint():
"""Enables printing to screen"""
sys.stdout = sys.__stdout__
def chunkIt(length_seq, num):
avg = length_seq / float(num)
out = []
last = 0.0
idx_list = []
while last < length_seq:
idx_list.append([int(last),int(last + avg)])
#out.append(seq[int(last):int(last + avg)])
last += avg
return idx_list | alexandreday/fast_density_clustering | build/lib/fdc/fdc.py | Python | bsd-3-clause | 23,059 | [
"Gaussian"
] | edd8fbc860f516658337868398fc5cc0dfd04078c807072111cfe9662feb7f4f |
# to understand whot is done here and why, look at myGayss.md
# about the root of random numbers in Python
# https://github.com/python/cpython/blob/master/Lib/random.py
import os
import random as r
import math
import commonVar as common
#import urllib.request
import zipfile, requests, io
# the myGauss calculation is possible via common.mg=myGauss.myG() in paramenters.property
# then call it with common.mg.myGauss(...)
class myG():
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = a = 1).
# tnx to Lambert Meertens, https://en.wikipedia.org/wiki/Lambert_Meertens
# cos and sin produce small differences in tails in Mac, Linux, Windows
# so we cut them; we do not truncate them with floor(x*10**n)*10**-n
# to avoid subtle division errors (1/10 in base 2)
def __init__(self):
self.TWOPI=2.0*math.pi
self.gauss_next=None
self.caseList=["1","2","3","4","5","6","7","7b","8","8b","9","9b","10","11"] # book cases
# NB cases 0a and 0b use another version of SLAPP, in Python2
self.error=False
self.link = \
"https://raw.githubusercontent.com/terna/oligopolyBookCasesGaussianValues/master/"
# this is for the zip file, but it is exactly the same of a .txt one; you calculation
# verify opening 'View raw' for a large file
#common.fgOu is set by default to None in commmonVar.py
#for the cases of the book Rethinking ... of Mazzoli, Morini,
#and Terna, we have recorded the same gaussaian values used there, whith random.gauss();
#if those values are missing, they are anyway calculated using the non perfectly portable
#random.gauss() tool and the common.fgOu value is != None to acrivate [$] below
#is this a book ASHAM case?
if common.case in self.caseList: #check if the recorded gaussian values are on disk
try: #local file existing
z = zipfile.ZipFile(common.project+"/exampleGauss/"+common.case+".txt.zip")
common.fgIn=z.open(common.case+".txt")
print("\nBook case",common.case,"\n",\
"\nReading gaussian random values locally.\n")
except: #if not, check if rerded gaussian values are online
try: # online file
r = requests.get(self.link+common.case+".txt.zip",\
headers = { "User-Agent":"My new User-Agent"})
# "My new User-Agent" is a trick from
# http://wolfprojects.altervista.org/articles/change-urllib-user-agent/
# unnecessary with github repositories, but does not hurt if left there
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall()
common.fgIn=z.open(common.case+".txt")
print("\nBook case",common.case,"\n",\
"\nReading gaussian random values from:",self.link,"\n")
except: #data does not exists, we will record them (see [$] above and below)
common.fgOu=open(common.project+\
"/exampleGauss/"+common.case+".txt","w")
print("\nBook case",common.case,"\n",\
"\nWriting gaussian random values to local 'exampleGauss' folder\n")
# the final zip operation is done in oActions.py look "#closing fgOu"
else:
if common.case== "": common.case="'unknown'"
print("\n\nThe running case "+common.case+" is outside book (Mazzoli et al. 2019) cases\n"\
"Neither using past random gaussian values nor saving the new generated ones\n")
# internal method
# the following result is the same in any operating system, using recorded
# values from previous cases (values generated by gauss() or new values
# generated by myGauss(), stable in all the operating systems)
def myGauss0(self,mu, sigma):
z = self.gauss_next
self.gauss_next=None
if z is None:
x2pi = r.random() * self.TWOPI
g2rad = math.sqrt(-2.0 * math.log(1.0 - r.random()))
g2rad=('%1.20f' % g2rad) # converts also exponent 'e'
# with an extra number of digits
g2rad=float(g2rad[0:12]) # cutting 'dangeorus' digits (rounding
# effects and different tails in Mac or Linux)
myCos=('%1.20f' % math.cos(x2pi)) # converts also exponent 'e'
mySin=('%1.20f' % math.sin(x2pi)) # with an extra number of digits
myCos=float(myCos[0:12]) # cutting 'dangeorus' digits (rounding
mySin=float(mySin[0:12]) # effects and different tails in Mac or Linux)
z = myCos * g2rad
return mu + z*sigma
# use this method
def myGauss(self,mu, sigma):
# new cases
if common.fgIn == None and common.fgOu==None:
return self.myGauss0(mu, sigma)
# cases of the book
if common.fgIn != None:
g=float(common.fgIn.readline())
r.gauss(mu, sigma) # void destination, the call is made to keep safe
# the original sequence of random number generation
return g
# [$] - see above the comment with [$] inside
if common.fgOu != None: # [$] see comment above
g=r.gauss(mu, sigma)
print(g,file=common.fgOu) #the close() is in oActions.py
return g
| terna/SLAPP3 | 6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/oligopoly/myGauss.py | Python | cc0-1.0 | 5,913 | [
"Gaussian"
] | 415810dcc5bab419613056ca55c67edbea4a10093f3193db20570233fbfbd61e |
#!/usr/bin/python
import bvpl_octree_batch;
import multiprocessing
import Queue
import time
import os;
import optparse;
from xml.etree.ElementTree import ElementTree
import sys
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
class gauss_job():
def __init__(self,scene, kernel , block_i, block_j, block_k, output_path, cell_length):
self.scene = scene;
self.kernel = kernel;
self.block_i = block_i;
self.block_j = block_j;
self.block_k = block_k;
self.output_path = output_path;
self.cell_length = cell_length;
def execute_jobs(jobs, num_procs=5):
work_queue=multiprocessing.Queue();
result_queue=multiprocessing.Queue();
for job in jobs:
work_queue.put(job)
for i in range(num_procs):
worker= gauss_worker(work_queue,result_queue)
worker.start();
print("worker with name ",worker.name," started!")
# collect the results off the queue
#important: having a result queue makes the execute_jobs wait for all jobs in the queue before exiting
results = []
while len(results) < len(jobs):
result = result_queue.get()
results.append(result)
return results
class gauss_worker(multiprocessing.Process):
def __init__(self,work_queue,result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
job = self.work_queue.get_nowait()
except Queue.Empty:
break
start_time = time.time();
bvpl_octree_batch.set_stdout('logs/log_' + str(os.getpid())+ ".txt");
print("Running Kernel");
bvpl_octree_batch.init_process("bvplBlockKernelOperatorProcess");
bvpl_octree_batch.set_input_from_db(0,job.scene);
bvpl_octree_batch.set_input_from_db(1,job.kernel);
bvpl_octree_batch.set_input_int(2, job.block_i);
bvpl_octree_batch.set_input_int(3, job.block_j)
bvpl_octree_batch.set_input_int(4, job.block_k)
bvpl_octree_batch.set_input_string(5,"algebraic");
bvpl_octree_batch.set_input_string(6, job.output_path);
bvpl_octree_batch.set_input_double(7, job.cell_length);
bvpl_octree_batch.run_process();
print ("Runing time for worker:", self.name)
print(time.time() - start_time);
#free memory
bvpl_octree_batch.reset_stdout();
bvpl_octree_batch.clear();
#output exit code in this case
#important: having a result queue makes the execute_jobs wait for all jobs in the queue before exiting
self.result_queue.put(0);
def parse_scene(scene_file, blocks):
#parse xml file
tree = ElementTree();
tree.parse(scene_file);
#find number of scenes
blocks_elm = tree.getroot().find('blocks');
if blocks_elm is None:
print "Error parsing boxm scene: No blocks_elm"
sys.exit(-1);
x = blocks_elm.get('x_dimension');
y = blocks_elm.get('y_dimension');
z = blocks_elm.get('z_dimension');
if x is None or y is None or z is None:
print "Error parsing boxm scene: Incorrect dimensions"
sys.exit(-1);
blocks.append(int(x));
blocks.append(int(y));
blocks.append(int(z));
#function to parse and xml file containing the model directory and the xml_file_name (without the path)
def parse_scenes_info(scenes_info_file, model_dirs, output_dirs, lengths, scene_blocks):
print 'Parsing: ' + scenes_info_file
#parse xml file
bof_tree = ElementTree();
bof_tree.parse(scenes_info_file);
scenes_elm = bof_tree.getroot().findall('scene');
if scenes_elm is None:
print "Invalid bof info file: No scenes element"
sys.exit(-1);
#find scene paths
for s in range(0, len(scenes_elm)):
path = scenes_elm[s].get("path");
cell_length = scenes_elm[s].get("cell_length");
output_dir = scenes_elm[s].get("output_dir");
if path is None:
print "Invalid info file: Error parsing scene path"
sys.exit(-1);
if output_dir is None:
print "Invalid info file: Error parsing output_dir"
sys.exit(-1);
if cell_length is None:
print "Invalid info file: Error parsing cell_length"
sys.exit(-1);
model_dirs.append(path);
output_dirs.append(output_dir);
lengths.append(float(cell_length));
blocks = [];
blocks.append(s);
parse_scene(path, blocks);
scene_blocks.append(blocks);
#*********************The Main Algorithm ****************************#
if __name__=="__main__":
bvpl_octree_batch.register_processes();
bvpl_octree_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
#Parse inputs
parser = optparse.OptionParser(description='Compute Expected Color Scene');
parser.add_option('--scenes_info', action="store", dest="scenes_info", type="string", default="");
parser.add_option('--num_cores', action="store", dest="num_cores", type="int", default=4);
parser.add_option('--sigma', action="store", dest="sigma", type="float", default=1.0);
options, args = parser.parse_args()
scenes_info = options.scenes_info;
num_cores = options.num_cores;
sigma = options.sigma;
model_dirs = [];
output_dirs = [];
lengths=[];
scene_blocks =[];
parse_scenes_info(scenes_info, model_dirs, output_dirs, lengths, scene_blocks);
#Begin multiprocessing
job_list=[];
start_time = time.time();
print("Creating Gauss kernel");
bvpl_octree_batch.init_process("bvpl_create_gauss3d_kernel_process");
bvpl_octree_batch.set_input_float(0,sigma);
bvpl_octree_batch.set_input_float(1,sigma);
bvpl_octree_batch.set_input_float(2,sigma);
bvpl_octree_batch.set_input_float(3,1.0); #axis of rotation - irrelevant for isotropic gaussian
bvpl_octree_batch.set_input_float(4,0.0);
bvpl_octree_batch.set_input_float(5,0.0);
bvpl_octree_batch.set_input_float(6,0.0); #rotation about axis of rotation ;)
bvpl_octree_batch.run_process();
(kernel_id,kernel_type)= bvpl_octree_batch.commit_output(0);
kernel = dbvalue(kernel_id,kernel_type);
#Enqueue jobs
if(len(model_dirs)==len(output_dirs)==len(lengths)==len(scene_blocks) ):
for scene_id in range (14, len(scene_blocks)):
if not os.path.isdir(output_dirs[scene_id] +"/"):
os.mkdir(output_dirs[scene_id] +"/");
if not os.path.isdir(output_dirs[scene_id] +"/drishti/"):
os.mkdir(output_dirs[scene_id] +"/drishti/");
print("Creating a Scene");
bvpl_octree_batch.init_process("boxmCreateSceneProcess");
bvpl_octree_batch.set_input_string(0, model_dirs[scene_id]);
bvpl_octree_batch.run_process();
(id, type) = bvpl_octree_batch.commit_output(0);
scene= dbvalue(id, type);
nblocks = scene_blocks[scene_id];
print nblocks;
for block_i in range (0, nblocks[1]):
for block_j in range (0, nblocks[2]):
for block_k in range (0, nblocks[3]):
current_job = gauss_job(scene, kernel , block_i, block_j, block_k, output_dirs[scene_id], lengths[scene_id]);
job_list.append(current_job);
results = execute_jobs(job_list, num_cores);
print results;
print("Creating a Scene");
bvpl_octree_batch.init_process("boxmCreateSceneProcess");
bvpl_octree_batch.set_input_string(0, output_dirs[scene_id] +"/float_response_scene.xml");
bvpl_octree_batch.run_process();
(id, type) = bvpl_octree_batch.commit_output(0);
scene= dbvalue(id, type);
print("Save Scene");
bvpl_octree_batch.init_process("boxmSaveSceneRawProcess");
bvpl_octree_batch.set_input_from_db(0,scene);
bvpl_octree_batch.set_input_string(1, output_dirs[scene_id] + "/drishti/gauss_scene");
bvpl_octree_batch.set_input_unsigned(2,0);
bvpl_octree_batch.set_input_unsigned(3,1);
bvpl_octree_batch.run_process();
print ("Total running time: ");
print(time.time() - start_time);
| mirestrepo/voxels-at-lems | pmvs/gauss_smoothing_batch.py | Python | bsd-2-clause | 9,060 | [
"Gaussian"
] | cab273433283780a8e6c5a7ba5f282e53b8d58e95680fac10ed641b6550a5a58 |
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
def getPlotLog(d,log,dmax=200):
d = np.array(d, dtype=float)
log = np.array(log, dtype=float)
dplot = np.kron(d,np.ones(2))
logplot = np.kron(log,np.ones(2))
# dplot = dplot[1:]
dplot = np.append(dplot[1:],dmax)
return dplot, logplot
def getImpedance(rholog,vlog):
"""
Acoustic Impedance is the product of density and velocity
$$
Z = \\rho v
$$
"""
rholog, vlog = np.array(rholog, dtype=float), np.array(vlog, dtype=float),
return rholog*vlog
def getReflectivity(d,rho,v,usingT=True):
"""
The reflection coefficient of an interface is
$$
R_i = \\frac{Z_{i+1} - Z_{i}}{Z_{i+1}+Z_{i}}
$$
The reflectivity can also include the effect of transmission through above layers, in which case the reflectivity is given by
$$
\\text{reflectivity} = R_i \\pi_{j = 1}^{i-1}(1-R_j^2)
$$
"""
Z = getImpedance(rho,v) # acoustic impedance
dZ = (Z[1:] - Z[:-1])
sZ = (Z[:-1] + Z[1:])
R = dZ/sZ # reflection coefficients
nlayer = len(v) # number of layers
rseries = R
if usingT:
for i in range(nlayer-1):
rseries[i+1:] = rseries[i+1:]*(1.-R[i]**2)
return rseries, R
def getTimeDepth(d,v,dmax=200):
"""
The time depth conversion is computed by determining the two-way travel time for a reflection from a given depth.
"""
d = np.sort(d)
d = np.append(d,dmax)
twttop = 2.*np.diff(d)/v # 2-way travel time within each layer
twttop = np.append(0.,twttop)
twttop = np.cumsum(twttop) # 2-way travel time from surface to top of each layer
return d, twttop
def getLogs(d, rho, v, usingT=True):
"""
Function to make plotting convenient
"""
dpth, rholog = getPlotLog(d,rho)
_ , vlog = getPlotLog(d,v)
zlog = getImpedance(rholog,vlog)
rseries, _ = getReflectivity(d,rho,v,usingT)
return dpth, rholog, vlog, zlog, rseries
def syntheticSeismogram(d, rho, v, wavf, wavA=1., usingT=True, wavtyp = 'RICKER', dt=0.0001, dmax=200):
"""
function syntheticSeismogram(d, rho, v, wavtyp, wavf, usingT)
syntheicSeismogram generates a synthetic seismogram for
a simple 1-D layered model.
Inputs:
d : depth to the top of each layer (m)
rho : density of each layer (kg/m^3)
v : velocity of each layer (m/s)
The last layer is assumed to be a half-space
wavf : wavelet frequency
wavA : wavelet amplitude
usintT : using Transmission coefficients?
wavtyp : type of Wavelet
The wavelet options are:
Ricker: takes one frequency
Gaussian: still in progress
Ormsby: takes 4 frequencies
Klauder: takes 2 frequencies
usingT : use transmission coefficients?
Lindsey Heagy
lheagy@eos.ubc.ca
Created: November 30, 2013
Modified: October 3, 2014
"""
v, rho, d = np.array(v, dtype=float), np.array(rho, dtype=float), np.array(d, dtype=float)
usingT = np.array(usingT, dtype=bool)
_, t = getTimeDepth(d,v,dmax)
rseries,R = getReflectivity(d,rho,v)
# time for reflectivity series
tref = t[1:-1]
# create time vector
t = np.arange(t.min(),t.max(),dt)
# make wavelet
twav = np.arange(-2.0/np.min(wavf), 2.0/np.min(wavf), dt)
# Get source wavelet
wav = {'RICKER':getRicker, 'ORMSBY':getOrmsby, 'KLAUDER':getKlauder}[wavtyp](wavf,twav)
wav = wavA*wav
rseriesconv = np.zeros(len(t))
for i in range(len(tref)):
index = np.abs(t - tref[i]).argmin()
rseriesconv[index] = rseries[i]
# Do the convolution
seis = np.convolve(wav,rseriesconv)
tseis = np.min(twav)+dt*np.arange(len(seis))
index = np.logical_and(tseis >= 0, tseis <= np.max(t))
tseis = tseis[index]
seis = seis[index]
return tseis, seis, twav, wav, tref, rseries
## WAVELET DEFINITIONS
pi = np.pi
def getRicker(f,t):
"""
Retrieves a Ricker wavelet with center frequency f.
See: http://www.subsurfwiki.org/wiki/Ricker_wavelet
"""
# assert len(f) == 1, 'Ricker wavelet needs 1 frequency as input'
# f = f[0]
pift = pi*f*t
wav = (1 - 2*pift**2)*np.exp(-pift**2)
return wav
# def getGauss(f,t):
# assert len(f) == 1, 'Gauss wavelet needs 1 frequency as input'
# f = f[0]
def getOrmsby(f,t):
"""
Retrieves an Ormsby wavelet with low-cut frequency f[0], low-pass frequency f[1], high-pass frequency f[2] and high-cut frequency f[3]
See: http://www.subsurfwiki.org/wiki/Ormsby_filter
"""
assert len(f) == 4, 'Ormsby wavelet needs 4 frequencies as input'
f = np.sort(f) #Ormsby wavelet frequencies must be in increasing order
pif = pi*f
den1 = pif[3] - pif[2]
den2 = pif[1] - pif[0]
term1 = (pif[3]*np.sinc(pif[3]*t))**2 - (pif[2]*np.sinc(pif[2]))**2
term2 = (pif[1]*np.sinc(pif[1]*t))**2 - (pif[0]*np.sinc(pif[0]))**2
wav = term1/den1 - term2/den2;
return wav
def getKlauder(f,t,T=5.0):
"""
Retrieves a Klauder Wavelet with upper frequency f[0] and lower frequency f[1].
See: http://www.subsurfwiki.org/wiki/Ormsby_filter
"""
assert len(f) == 2, 'Klauder wavelet needs 2 frequencies as input'
k = np.diff(f)/T
f0 = np.sum(f)/2.0
wav = np.real(np.sin(pi*k*t*(T-t))/(pi*k*t)*np.exp(2*pi*1j*f0*t))
return wav
## Plotting Functions
def plotLogFormat(log, dpth,xlim, col='blue'):
"""
Nice formatting for plotting logs as a function of depth
"""
ax = plt.plot(log,dpth,linewidth=2,color=col)
plt.xlim(xlim)
plt.ylim((dpth.min(),dpth.max()))
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
return ax
def plotLogs(d, rho, v, usingT=True):
"""
Plotting wrapper to plot density, velocity, acoustic impedance and reflectivity as a function of depth.
"""
d = np.sort(d)
dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)
nd = len(dpth)
xlimrho = (1.95,5.05)
xlimv = (0.25,4.05)
xlimz = (xlimrho[0]*xlimv[0], xlimrho[1]*xlimv[1])
# Plot Density
plt.figure(1)
plt.subplot(141)
plotLogFormat(rholog*10**-3,dpth,xlimrho,'blue')
plt.title('$\\rho$')
plt.xlabel('Density \n $\\times 10^3$ (kg /m$^3$)',fontsize=9)
plt.ylabel('Depth (m)',fontsize=9)
plt.subplot(142)
plotLogFormat(vlog*10**-3,dpth,xlimv,'red')
plt.title('$v$')
plt.xlabel('Velocity \n $\\times 10^3$ (m/s)',fontsize=9)
plt.setp(plt.yticks()[1],visible=False)
plt.subplot(143)
plotLogFormat(zlog*10.**-6.,dpth,xlimz,'green')
plt.gca().set_title('$Z = \\rho v$')
plt.gca().set_xlabel('Impedance \n $\\times 10^{6}$ (kg m$^{-2}$ s$^{-1}$)',fontsize=9)
plt.setp(plt.yticks()[1],visible=False)
plt.subplot(144)
plt.hlines(d[1:],np.zeros(nd-1),rseries,linewidth=2)
plt.plot(np.zeros(nd),dpth,linewidth=2,color='black')
plt.title('Reflectivity');
plt.xlim((-1.,1.))
plt.gca().set_xlabel('Reflectivity')
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],visible=False)
plt.tight_layout()
plt.show()
def plotTimeDepth(d,v):
"""
Wrapper to plot time-depth conversion based on the provided velocity model
"""
dpth,t = getTimeDepth(d,v)
plt.figure()
plt.plot(dpth,t,linewidth=2);
plt.title('Depth-Time');
plt.grid()
plt.gca().set_xlabel('Depth (m)',fontsize=9)
plt.gca().set_ylabel('Two Way Time (s)',fontsize=9)
plt.tight_layout()
plt.show()
def plotSeismogram(d, rho, v, wavf, wavA=1., noise = 0., usingT=True, wavtyp='RICKER'):
"""
Plotting function to plot the wavelet, reflectivity series and seismogram as functions of time provided the geologic model (depths, densities, and velocities)
"""
tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(d, rho, v, wavf, wavA, usingT,wavtyp)
noise = noise*np.max(np.abs(seis))*np.random.randn(seis.size)
filt = np.arange(1.,15.)
filtr = filt[::-1]
filt = np.append(filt,filtr[1:])*1./15.
noise = np.convolve(noise,filt)
noise = noise[0:seis.size]
seis = seis + noise
plt.figure()
plt.subplot(131)
plt.plot(wav,twav,linewidth=1,color='black')
plt.title('Wavelet')
plt.xlim((-2.,2.))
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.subplot(132)
plt.plot(np.zeros(tref.size),(tseis.max(),tseis.min()),linewidth=2,color='black')
plt.hlines(tref,np.zeros(len(rseriesconv)),rseriesconv,linewidth=2) #,'marker','none'
plt.title('Reflectivity')
plt.grid()
plt.ylim((0,tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-1.,1.))
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.subplot(133)
plt.plot(seis,tseis,color='black',linewidth=1)
plt.title('Seismogram')
plt.grid()
plt.ylim((tseis.min(),tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-0.95,0.95))
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.tight_layout()
plt.show()
def plotSeismogramV2(d, rho, v, wavf, wavA=1., noise = 0., usingT=True, wavtyp='RICKER'):
"""
Plotting function to show physical property logs (in depth) and seismogram (in time).
"""
dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)
tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(d, rho, v, wavf, wavA, usingT,wavtyp)
noise = noise*np.max(np.abs(seis))*np.random.randn(seis.size)
filt = np.arange(1.,21.)
filtr = filt[::-1]
filt = np.append(filt,filtr[1:])*1./21.
noise = np.convolve(noise,filt)
noise = noise[0:seis.size]
xlimrho = (1.95,5.05)
xlimv = (0.25,4.05)
xlimz = (xlimrho[0]*xlimv[0], xlimrho[1]*xlimv[1])
seis = seis + noise
plt.figure()
plt.subplot(131)
plotLogFormat(rholog*10**-3,dpth,xlimrho,'blue')
plt.title('$\\rho$')
plt.xlabel('Density \n $\\times 10^3$ (kg /m$^3$)',fontsize=9)
plt.ylabel('Depth (m)',fontsize=9)
plt.subplot(132)
plotLogFormat(vlog*10**-3,dpth,xlimv,'red')
plt.title('$v$')
plt.xlabel('Velocity \n $\\times 10^3$ (m/s)',fontsize=9)
plt.ylabel('Depth (m)',fontsize=9)
plt.subplot(133)
plt.plot(seis,tseis,color='black',linewidth=1)
plt.title('Seismogram')
plt.grid()
plt.ylim((tseis.min(),tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-0.5,0.5))
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.tight_layout()
plt.show()
## INTERACTIVE PLOT WRAPPERS
def plotLogsInteract(d2,d3,rho1,rho2,rho3,v1,v2,v3,usingT=False):
"""
interactive wrapper of plotLogs
"""
d = np.array((0.,d2,d3), dtype=float)
rho = np.array((rho1,rho2,rho3), dtype=float)
v = np.array((v1,v2,v3), dtype=float)
plotLogs(d, rho, v, usingT)
def plotTimeDepthInteract(d2,d3,v1,v2,v3):
"""
interactive wrapper for plotTimeDepth
"""
d = np.array((0.,d2,d3), dtype=float)
v = np.array((v1,v2,v3), dtype=float)
plotTimeDepth(d,v)
def plotSeismogramInteractFixMod(wavf,wavA):
"""
interactive wrapper for plot seismogram
"""
d = [0., 50., 100.] # Position of top of each layer (m)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.] # Density of each layer (kg/m^3)
wavf = np.array(wavf, dtype=float)
usingT = True
plotSeismogram(d, rho, v, wavf, wavA, 0., usingT)
def plotSeismogramInteract(d2,d3,rho1,rho2,rho3,v1,v2,v3,wavf,wavA,AddNoise=False,usingT=True):
"""
interactive wrapper for plot SeismogramV2 for a fixed geologic model
"""
d = np.array((0.,d2,d3), dtype=float)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.]
if AddNoise:
noise = 0.02
else:
noise = 0.
plotSeismogramV2(d, rho, v, wavf, wavA, noise,usingT)
def plotSeismogramInteractRes(h2,wavf,AddNoise=False):
"""
Interactive wrapper for plotSeismogramV2 for a fixed geologic model
"""
d = [0., 50., 50.+h2] # Position of top of each layer (m)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.] # Density of each layer (kg/m^3)
wavf = np.array(wavf, dtype=float)
usingT = True
if AddNoise:
noise = 0.02
else:
noise = 0.
plotSeismogramV2(d, rho, v, wavf, 1., noise)
if __name__ == '__main__':
d = [0., 50., 100.] # Position of top of each layer (m)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.] # Density of each layer (kg/m^3)
wavtyp = 'RICKER' # Wavelet type
wavf = 50. # Wavelet Frequency
usingT = False # Use Transmission Coefficients?
#plotLogsInteract(d[1],d[2],rho[0],rho[1],rho[2],v[0],v[1],v[2])
#plotTimeDepth(d,v)
#plotSeismogram(d, rho, v, wavtyp, wavf, usingT)
#plotSeismogramV2(d, rho, v, 50., wavA=1., noise = 0., usingT=True, wavtyp='RICKER')
| jaabell/Seismogram | syntheticSeismogram.py | Python | mit | 14,041 | [
"Gaussian"
] | 66be24d192560f10066eadbc34edb86a9ce8316cae77703bca960ee0f5d9052f |
"""Copyright 2008 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.4'
__date__ = 'May 02 2008'
from threading import *
from ctypes import *
from Phidgets.Phidget import *
from Phidgets.PhidgetException import *
import sys
class TextLCD(Phidget):
"""This class represents a Phidget Text LCD.
All methods to control the Text LCD are implemented in this class.
The TextLCD Phidget consists of a Vacuum Fluorescent display that is
capable of displaying Standard as well as custom characters in multiple rows.
Extends:
Phidget
"""
def __init__(self):
"""The Constructor Method for the TextLCD Class
"""
Phidget.__init__(self)
self.dll.CPhidgetTextLCD_create(byref(self.handle))
def getRowCount(self):
"""Returns the number of rows available on the display.
Returns:
The number of rows <int>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
rowCount = c_int()
result = self.dll.CPhidgetTextLCD_getRowCount(self.handle, byref(rowCount))
if result > 0:
raise PhidgetException(result)
else:
return rowCount.value
def getColumnCount(self):
"""Returns the number of columns (characters per row) available on the display.
This value is the same for every row.
Returns:
The number of rows <int>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
columnCount = c_int()
result = self.dll.CPhidgetTextLCD_getColumnCount(self.handle, byref(columnCount))
if result > 0:
raise PhidgetException(result)
else:
return columnCount.value
def getBacklight(self):
"""Returns the status of the backlight.
True indicated that the backlight is on, False indicated that it is off.
The backlight is by default turned on.
Returns:
The status of the backlight <boolean>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
backlightStatus = c_int()
result = self.dll.CPhidgetTextLCD_getBacklight(self.handle, byref(backlightStatus))
if result > 0:
raise PhidgetException(result)
else:
if backlightStatus.value == 1:
return True
else:
return False
def setBacklight(self, state):
"""Sets the status of the backlight.
True turns the backlight on, False turns it off.
The backlight is by default turned on.
Parameters:
state<boolean>: the desired backlight state.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
if state == True:
value = 1
else:
value = 0
result = self.dll.CPhidgetTextLCD_setBacklight(self.handle, c_int(value))
if result > 0:
raise PhidgetException(result)
def getContrast(self):
"""Returns the contrastof the display.
This is the contrast of the entire display.
Returns:
The current contrast setting <int>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
contrast = c_int()
result = self.dll.CPhidgetTextLCD_getContrast(self.handle, byref(contrast))
if result > 0:
raise PhidgetException(result)
else:
return contrast.value
def setContrast(self, value):
"""Sets the contrast of the display.
The valid range is 0-255.
Changing the contrast can increase the readability of the display in certain viewing situation, such as at an odd angle.
Parameters:
value<int>: the desired contrast value.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
result = self.dll.CPhidgetTextLCD_setContrast(self.handle, c_int(value))
if result > 0:
raise PhidgetException(result)
def getCursor(self):
"""Returns the status of the cursor.
True indicates that the cursor on, False indicates that it is off.
The cursor is an underscore which appears directly to the right of the last entered character on the display.
The cursor is by default disabled.
Returns:
The status of the cursor <boolean>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
cursorStatus = c_int()
result = self.dll.CPhidgetTextLCD_getCursorOn(self.handle, byref(cursorStatus))
if result > 0:
raise PhidgetException(result)
else:
if cursorStatus.value == 1:
return True
else:
return False
def setCursor(self, state):
"""Sets the state of the cursor.
True turns the cursor is on, False turns it off.
The cursor is an underscore which appears directly to the right of the last entered character on the display.
The cursor is by default disabled.
Parameters:
state<boolean>: the desired cursor state.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
if state == True:
value = 1
else:
value = 0
result = self.dll.CPhidgetTextLCD_setCursorOn(self.handle, c_int(value))
if result > 0:
raise PhidgetException(result)
def getCursorBlink(self):
"""Returns the status of the cursor blink.
True indicates that the cursor blink is on, False indicates that it is off.
The cursor blink is an flashing box which appears directly to the right of
the last entered character on the display, in the same spot as the cursor if it is enabled.
The cursor blink is by default disabled.
Returns:
The current status of the cursor blink <boolean>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
cursorBlinkStatus = c_int()
result = self.dll.CPhidgetTextLCD_getCursorBlink(self.handle, byref(cursorBlinkStatus))
if result > 0:
raise PhidgetException(result)
else:
if cursorBlinkStatus.value == 1:
return True
else:
return False
def setCursorBlink(self, state):
"""Sets the state of the cursor blink.
True turns the cursor blink on, False turns it off.
The cursor blink is an flashing box which appears directly to the right
of the last entered character on the display, in the same spot as the cursor if it is enabled.
The cursor blink is by default disabled.
Parameters:
state - the desired state of the cursor blink <boolean>.
Exceptions:
PhidgetException: If this Phidget is not opened and attached.
"""
if state == True:
value = 1
else:
value = 0
result = self.dll.CPhidgetTextLCD_setCursorBlink(self.handle, c_int(value))
if result > 0:
raise PhidgetException(result)
def setDisplayString(self, index, string):
"""Sets the display string of a certain row.
If the string is longer then the row, it will be truncated.
Parameters:
index<int>: the index of the row to write the string to.
string<string>: the string to display.
Exceptions:
PhidgetException: If this Phidget is not opened and attached, or if the row index is invalid.
"""
result = self.dll.CPhidgetTextLCD_setDisplayString(self.handle, c_int(index), c_char_p(string))
if result > 0:
raise PhidgetException(result)
def setCustomCharacter(self, index, part1, part2):
"""Sets a custom character.
You can set up to 8 (0-7) custom characters, each one is completely defined by two integers,
and gets stored in the character display until power is removed, whence they must be re-programmed.
See TextLCD-simple.py for an example of how this works.
Parameters:
index<int>: custom character list index.
part1<int>: first half of the character code.
part2<int>: second half of the character code.
Exceptions:
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
result = self.dll.CPhidgetTextLCD_setCustomCharacter(self.handle, c_int(index + 8), c_int(part1), c_int(part2))
if result > 0:
raise PhidgetException(result)
def getCustomCharacter(self, index):
"""Returns the custom character location in the ascii character storage space in the TextLCD
This returns the hex representation of the actual index location into the ascii character table where
the custom character is stored. This function can be called and the result sent to the setDisplayString function
to display the custom character.
See TextLCD-simple.py for an example of how this works.
Parameters:
index<int>: custom character list index.
Returns:
The Stringcode at the specified index (hex format) <string>.
Exceptions:
IndexError: If the supplied index is out of range.
"""
if index == 0:
return "\010"
elif index == 1:
return "\011"
elif index == 2:
return "\012"
elif index == 3:
return "\013"
elif index == 4:
return "\014"
elif index == 5:
return "\015"
elif index == 6:
return "\016"
elif index == 7:
return "\017"
else:
message = "Index value %i is out of range" % index
raise IndexError(message) | jantman/tuxostat | fs_backup/home/tuxostat/devel/PhidgetsPython/Phidgets/Devices/TextLCD.py | Python | gpl-3.0 | 11,021 | [
"VisIt"
] | 9cfc73f220dd9a8f03893cf2f7d914db850efea3ead1d3b2f379ca22f29bdd02 |
from ase import Atom, Atoms
from gpaw import GPAW
from gpaw.test import equal
a = 4.00
d = a / 2**0.5
z = 1.1
b = 1.5
slab = Atoms([Atom('Al', (0, 0, 0)),
Atom('Al', (a, 0, 0)),
Atom('Al', (a/2, d/2, -d/2)),
Atom('Al', (3*a/2, d/2, -d/2)),
Atom('Al', (0, 0, -d)),
Atom('Al', (a, 0, -d)),
Atom('Al', (a/2, d/2, -3*d/2)),
Atom('Al', (3*a/2, d/2, -3*d/2)),
Atom('Al', (0, 0, -2*d)),
Atom('Al', (a, 0, -2*d)),
Atom('H', (a/2-b/2, 0, z)),
Atom('H', (a/2+b/2, 0, z))],
cell=(2*a, d, 5*d), pbc=(1, 1, 1))
calc = GPAW(h=0.25, nbands=28, kpts=(2, 6, 1),
convergence={'eigenstates': 1e-5})
slab.set_calculator(calc)
e = slab.get_potential_energy()
niter = calc.get_number_of_iterations()
assert len(calc.get_k_point_weights()) == 3
for i in range(1):
slab.positions[-2, 0] -= 0.01
slab.positions[-1, 0] += 0.01
e = slab.get_potential_energy()
print e, niter
energy_tolerance = 0.00015
niter_tolerance = 0
equal(e, -44.69217, energy_tolerance)
| robwarm/gpaw-symm | gpaw/test/big/miscellaneous/H2Al110.py | Python | gpl-3.0 | 1,200 | [
"ASE",
"GPAW"
] | 9bc4b11431354565200e87bf85446f07f712d5f617ebf7fea8fc28dd905e989b |
#!/usr/bin/env python
#
# SchoolTool - common information systems platform for school administration
# Copyright (c) 2003 Shuttleworth Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
SchoolTool test runner.
Syntax: test.py [options] [pathname-regexp [test-regexp]]
There are two kinds of tests:
- unit tests (or programmer tests) test the internal workings of various
components of the system
- functional tests (acceptance tests, customer tests) test only externaly
visible system behaviour
You can choose to run unit tests (this is the default mode), functional tests
(by giving a -f option to test.py) or both (by giving both -u and -f options).
Test cases are located in the directory tree starting at the location of this
script, in subdirectories named 'tests' for unit tests and 'ftests' for
functional tests, in Python modules named 'test*.py'. They are then filtered
according to pathname and test regexes. Alternatively, packages may just have
'tests.py' and 'ftests.py' instead of subpackages 'tests' and 'ftests'
respectively.
A leading "!" in a regexp is stripped and negates the regexp. Pathname
regexp is applied to the whole path (package/package/module.py). Test regexp
is applied to a full test id (package.package.module.class.test_method).
Options:
-h, --help print this help message
-v verbose (print dots for each test run)
-vv very verbose (print test names)
-q quiet (do not print anything on success)
-c colorize output (assumes dark background)
-C colorize output (assumes bright background)
-w enable warnings about omitted test cases
-d invoke pdb when an exception occurs
-1 report only the first failure in doctests
-p show progress bar (can be combined with -v or -vv)
-u select unit tests (default)
-f select functional tests
--level n select only tests at level n or lower
--all-levels select all tests
--list-files list all selected test files
--list-tests list all selected test cases
--list-hooks list all loaded test hooks
--coverage create code coverage reports
--search-in dir limit directory tree walk to dir (optimisation)
--immediate-errors show errors as soon as they happen (default)
--delayed-errors show errors after all unit tests were run
"""
#
# This script borrows ideas from Zope 3's test runner heavily. It is smaller
# and cleaner though, at the expense of more limited functionality.
#
import re
import os
import sys
import time
import types
import getopt
import doctest
import unittest
import traceback
import linecache
import pdb
from sets import Set
__metaclass__ = type
class Options:
"""Configurable properties of the test runner."""
# test location
basedir = '.' # base directory for tests (defaults to
# basedir of argv[0] + 'src'), must be absolute
search_in = () # list of subdirs to traverse (defaults to
# basedir)
follow_symlinks = True # should symlinks to subdirectories be
# followed? (hardcoded, may cause loops)
# which tests to run
unit_tests = False # unit tests (default if both are false)
functional_tests = False # functional tests
# test filtering
level = 1 # run only tests at this or lower level
# (if None, runs all tests)
pathname_regex = '' # regexp for filtering filenames
test_regex = '' # regexp for filtering test cases
# actions to take
list_files = False # --list-files
list_tests = False # --list-tests
list_hooks = False # --list-hooks
run_tests = True # run tests (disabled by --list-foo)
postmortem = False # invoke pdb when an exception occurs
# output verbosity
verbosity = 0 # verbosity level (-v)
quiet = 0 # do not print anything on success (-q)
warn_omitted = False # produce warnings when a test case is
# not included in a test suite (-w)
first_doctest_failure = False # report first doctest failure (-1)
print_import_time = True # print time taken to import test modules
# (currently hardcoded)
progress = False # show running progress (-p)
colorizer = None # colorize output (-c)
coverage = False # produce coverage reports (--coverage)
coverdir = 'coverage' # where to put them (currently hardcoded)
immediate_errors = True # show tracebacks twice (--immediate-errors,
# --delayed-errors)
screen_width = 80 # screen width (autodetected)
def compile_matcher(regex):
"""Return a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
def walk_with_symlinks(top, func, arg):
"""Like os.path.walk, but follows symlinks on POSIX systems.
If the symlinks create a loop, this function will never finish.
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def get_test_files(cfg):
"""Return a list of test module filenames."""
matcher = compile_matcher(cfg.pathname_regex)
allresults = []
testdir_names = []
if cfg.functional_tests:
testdir_names.append('ftests')
if cfg.unit_tests:
testdir_names.append('tests')
baselen = len(cfg.basedir) + 1
def visit(ignored, dir, files):
# Ignore files starting with a dot.
# Do not not descend into subdirs containing a dot.
remove = []
for idx, file in enumerate(files):
if file.startswith('.'):
remove.append(idx)
elif '.' in file and os.path.isdir(os.path.join(dir, file)):
remove.append(idx)
remove.reverse()
for idx in remove:
del files[idx]
# Skip non-test directories, but look for tests.py and/or ftests.py
if os.path.basename(dir) != testdir_name:
if testdir_name + '.py' in files:
path = os.path.join(dir, testdir_name + '.py')
if matcher(path[baselen:]):
results.append(path)
return
if '__init__.py' not in files:
print >> sys.stderr, "%s is not a package" % dir
return
for file in files:
if file.startswith('test') and file.endswith('.py'):
path = os.path.join(dir, file)
if matcher(path[baselen:]):
results.append(path)
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
for testdir_name in testdir_names:
results = []
for dir in cfg.search_in:
walker(dir, visit, None)
results.sort()
allresults += results
return allresults
def import_module(filename, cfg, tracer=None):
"""Import and return a module."""
filename = os.path.splitext(filename)[0]
if filename.startswith(cfg.basedir):
filename = filename[len(cfg.basedir):]
modname = filename.replace(os.path.sep, '.')
if modname.startswith('.'):
modname = modname[1:]
if tracer is not None:
mod = tracer.runfunc(__import__, modname)
else:
mod = __import__(modname)
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
# Classess passed to isinstance to see whether a test is a DocFileCase.
# There's doctest.DocFileCase (if you have Python 2.4), and then there might be
# zope.testing.doctest.DocFileCase.
if hasattr(doctest, 'DocFileCase'):
DocFileCase_classes = (doctest.DocFileCase,)
else:
DocFileCase_classes = ()
def name_of_test(test, basedir=None):
"""Return the name of a test.
In most cases the name will be "package.module.class.method", however it
is different for doctest files, where it will be "subdir/subdir/filename".
"""
if isinstance(test, DocFileCase_classes):
# test.id() returns something like "README_txt", while str(test)
# returns the pathname
doctest_filename = os.path.abspath(str(test))
if basedir and doctest_filename.startswith(basedir + '/'):
doctest_filename = doctest_filename[len(basedir) + 1:]
return doctest_filename
else:
# test.id() returns something like
# "package.module.TestClass.test_method", while str(test)
# returns "test_method (package.module.TestClass)".
return test.id()
def filter_testsuite(suite, matcher, level=None, basedir=None):
"""Return a flattened list of test cases that match the given matcher."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = []
for test in suite._tests:
if level is not None and getattr(test, 'level', 0) > level:
continue
if isinstance(test, unittest.TestCase):
testname = name_of_test(test, basedir)
if matcher(testname):
results.append(test)
else:
filtered = filter_testsuite(test, matcher, level, basedir)
results.extend(filtered)
return results
def get_all_test_cases(module):
"""Return a list of all test case classes defined in a given module."""
results = []
for name in dir(module):
if not name.startswith('Test'):
continue
item = getattr(module, name)
if (isinstance(item, (type, types.ClassType)) and
issubclass(item, unittest.TestCase)):
results.append(item)
return results
def get_test_classes_from_testsuite(suite):
"""Return a set of test case classes used in a test suite."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = Set()
for test in suite._tests:
if isinstance(test, unittest.TestCase):
results.add(test.__class__)
else:
classes = get_test_classes_from_testsuite(test)
results.update(classes)
return results
def get_test_cases(test_files, cfg, tracer=None):
"""Return a list of test cases from a given list of test modules."""
matcher = compile_matcher(cfg.test_regex)
results = []
startTime = time.time()
for file in test_files:
module = import_module(file, cfg, tracer=tracer)
try:
func = module.test_suite
except AttributeError:
print >> sys.stderr
print >> sys.stderr, ("%s: WARNING: there is no test_suite"
" function" % file)
print >> sys.stderr
continue
if tracer is not None:
test_suite = tracer.runfunc(func)
else:
test_suite = func()
if test_suite is None:
continue
if cfg.warn_omitted:
all_classes = Set(get_all_test_cases(module))
classes_in_suite = get_test_classes_from_testsuite(test_suite)
difference = all_classes - classes_in_suite
for test_class in difference:
# surround the warning with blank lines, otherwise it tends
# to get lost in the noise
print >> sys.stderr
print >> sys.stderr, ("%s: WARNING: %s not in test suite"
% (file, test_class.__name__))
print >> sys.stderr
if (cfg.level is not None and
getattr(test_suite, 'level', 0) > cfg.level):
continue
filtered = filter_testsuite(test_suite, matcher, cfg.level, cfg.basedir)
results.extend(filtered)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
if cfg.print_import_time:
nmodules = len(test_files)
plural = (nmodules != 1) and 's' or ''
print "Imported %d module%s in %.3fs" % (nmodules, plural, timeTaken)
print
return results
def get_test_hooks(test_files, cfg, tracer=None):
"""Return a list of test hooks from a given list of test modules."""
dirs = Set(map(os.path.dirname, test_files))
for dir in list(dirs):
if os.path.basename(dir) == 'ftests':
dirs.add(os.path.join(os.path.dirname(dir), 'tests'))
dirs = list(dirs)
dirs.sort()
hook_modules = []
for dir in dirs:
filename = os.path.join(dir, 'checks.py')
if os.path.exists(filename):
module = import_module(filename, cfg, tracer=tracer)
hook_modules.append(module)
# Also look in a a directory 'testsupport' which is a sibling of
# cfg.basedir
dir = os.path.join(os.path.dirname(cfg.basedir), 'testsupport')
filename = os.path.join(dir, 'checks.py')
if os.path.exists(filename):
sys.path.insert(0, dir)
try:
module = import_module('checks.py', cfg, tracer=tracer)
hook_modules.append(module)
finally:
del sys.path[0]
results = []
for module in hook_modules:
if tracer is not None:
hooks = tracer.runfunc(module.test_hooks)
else:
hooks = module.test_hooks()
results.extend(hooks)
return results
def extract_tb(tb, limit=None):
"""Improved version of traceback.extract_tb.
Includes a dict with locals in every stack frame instead of the line.
"""
list = []
while tb is not None and (limit is None or len(list) < limit):
frame = tb.tb_frame
code = frame.f_code
name = code.co_name
filename = code.co_filename
lineno = tb.tb_lineno
locals = frame.f_locals
list.append((filename, lineno, name, locals))
tb = tb.tb_next
return list
colorcodes = {'gray': 0, 'red': 1, 'green': 2, 'yellow': 3,
'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}
dark_colormap = { # Color scheme for dark backgrounds
'fail': 'red',
'pass': 'green',
'count': 'white',
'title': 'white',
'separator': 'dark white',
'longtestname': 'yellow',
'filename': 'dark green',
'lineno': 'green',
'testname': 'dark yellow',
'excname': 'red',
'excstring': 'yellow',
'tbheader': 'dark white',
'doctest_ignored': 'gray',
'doctest_title': 'dark white',
'doctest_code': 'yellow',
'doctest_expected': 'green',
'doctest_got': 'red',
'diff_expected': 'red',
'diff_actual': 'green',
'diff_context': 'dark white',
'diff_inline': 'gray'}
light_colormap = { # Color scheme for light backgrounds
'fail': 'red',
'pass': 'dark green',
'count': 'dark green',
'title': 'red',
'separator': 'dark white',
'longtestname': 'dark red',
'filename': 'dark green',
'lineno': 'dark magenta',
'testname': 'dark yellow',
'excname': 'red',
'excstring': 'dark yellow',
'tbheader': 'gray',
'doctest_ignored': 'dark white',
'doctest_title': 'gray',
'doctest_code': 'dark blue',
'doctest_expected': 'dark green',
'doctest_got': 'dark red',
'diff_expected': 'dark red',
'diff_actual': 'dark green',
'diff_context': 'dark gray',
'diff_inline': 'dark magenta'}
class Colorizer(object):
def __init__(self, colormap):
self.colormap = colormap
def colorize(self, texttype, text):
"""Colorize text by ANSI escape codes in a color provided in colormap."""
color = self.colormap[texttype]
if color.startswith('dark '):
light = 0
color = color[len('dark '):] # strip the 'dark' prefix
else:
light = 1
code = 30 + colorcodes[color]
return '\033[%d;%dm' % (light, code) + text + '\033[0m'
def colorize_ndiff(self, lines):
"""Colorize ndiff output.
Returns a new sequence of colored strings.
`lines` is a sequence of strings.
Typical input:
Some context lines
- This line was removed
Some context
+ This line was added
Some context
- This line esd chnged
? ^^^ -
+ This line was changd
? ^^^ +
Some context
"""
result = []
for line in lines:
if line.startswith(' -'):
result.append(self.colorize('diff_expected', line))
elif line.startswith(' +'):
result.append(self.colorize('diff_actual', line))
elif line.startswith(' ?'):
result.append(self.colorize('diff_inline', line))
else:
result.append(self.colorize('diff_context', line))
return result
def colorize_zope_doctest_output(self, lines):
"""Colorize output formatted by the doctest engine included with Zope 3.
Returns a new sequence of colored strings.
`lines` is a sequence of strings.
The typical structure of the doctest output looks either like this:
File "...", line 123, in foo.bar.baz.doctest_quux
Failed example:
f(2, 3)
Expected:
6
Got:
5
Or, if an exception has occurred, like this:
File "...", line 123, in foo.bar.baz.doctest_quux
Failed example:
f(2, 3)
Exception raised:
Traceback (most recent call last):
File "...", line 123, in __init__
self.do_something(a, b, c)
File "...", line ...
...
FooError: something bad happened
If some assumption made by this function is not met, the original sequence
is returned without any modifications.
"""
# XXX bug: doctest may report several failures in one test, they are
# separated by a horizontal dash line. Only the first one of
# them is now colorized properly.
header = lines[0]
if not header.startswith('File "'):
return lines # not a doctest failure report?
# Dissect the header in a rather nasty way.
header = header[len('File "'):]
fn_end = header.find('"')
if fn_end == -1:
return lines
filename = header[:fn_end]
header = header[fn_end+len('", line '):]
parts = header.split(', in ')
if len(parts) != 2:
lineno = header
filename = self.colorize('filename', filename)
lineno = self.colorize('lineno', lineno)
result = ['File "%s", line %s' % (filename, lineno)]
else:
lineno, testname = parts
filename = self.colorize('filename', filename)
lineno = self.colorize('lineno', lineno)
testname = self.colorize('testname', testname)
result = ['File "%s", line %s, in %s' % (filename, lineno, testname)]
# Colorize the 'Failed example:' section.
if lines[1] != 'Failed example:':
return lines
result.append(self.colorize('doctest_title', lines[1]))
remaining = lines[2:]
terminators = ['Expected:', 'Expected nothing', 'Exception raised:',
'Differences (ndiff with -expected +actual):']
while remaining and remaining[0] not in terminators:
line = remaining.pop(0)
result.append(self.colorize('doctest_code', line))
if not remaining:
return lines
if remaining[0] in ('Expected:', 'Expected nothing'):
result.append(self.colorize('doctest_title', remaining.pop(0))) # Expected:
while remaining and remaining[0] not in ('Got:', 'Got nothing'):
line = remaining.pop(0)
result.append(self.colorize('doctest_expected', line))
if not remaining or remaining[0] not in ('Got:', 'Got nothing'):
return lines
result.append(self.colorize('doctest_title', remaining.pop(0))) # Got:
while remaining:
line = remaining.pop(0)
result.append(self.colorize('doctest_got', line))
elif remaining[0] == 'Exception raised:':
result.append(self.colorize('doctest_title', remaining.pop(0))) # E. raised:
while remaining:
line = remaining.pop(0)
# TODO: Scrape and colorize the traceback.
result.append(self.colorize('doctest_got', line))
elif remaining[0] == 'Differences (ndiff with -expected +actual):':
result.append(self.colorize('doctest_title', remaining.pop(0))) # E. raised:
result.extend(self.colorize_ndiff(remaining))
else:
return lines
return result
def colorize_exception_only(self, lines):
"""Colorize result of traceback.format_exception_only."""
if len(lines) > 1:
return lines # SyntaxError? We won't deal with that for now.
lines = lines[0].splitlines()
# First, colorize the first line, which usually contains the name
# and the string of the exception.
result = []
doctest = 'Failed doctest test for' in lines[0]
# TODO: We only deal with the output from Zope 3's doctest module.
# A colorizer for the Python's doctest module would be nice too.
if doctest:
# If we have a doctest, we do not care about this header. All the
# interesting things are below, formatted by the doctest runner.
for lineno in range(4):
result.append(self.colorize('doctest_ignored', lines[lineno]))
beef = self.colorize_zope_doctest_output(lines[4:])
result.extend(beef)
return '\n'.join(result)
else:
# A simple exception. Try to colorize the first row, leave others be.
excline = lines[0].split(': ', 1)
if len(excline) == 2:
excname = self.colorize('excname', excline[0])
excstring = self.colorize('excstring', excline[1])
result.append('%s: %s' % (excname, excstring))
else:
result.append(self.colorize('excstring', lines[0]))
result.extend(lines[1:])
return '\n'.join(result)
def format_exception(etype, value, tb, limit=None, basedir=None,
colorizer=None):
"""Improved version of traceback.format_exception.
Includes Zope-specific extra information in tracebacks.
If colorizer is not None, it is used to colorize the output.
"""
color = (colorizer is not None)
if color:
colorize = colorizer.colorize
# Show stack trace.
list = []
if tb:
list = ['Traceback (most recent call last):\n']
if color:
list[0] = colorize('tbheader', list[0])
w = list.append
for filename, lineno, name, locals in extract_tb(tb, limit):
line = linecache.getline(filename, lineno)
if color and 'zope/testing/doctest.py' not in filename:
filename = colorize('filename', filename)
lineno = colorize('lineno', str(lineno))
name = colorize('testname', name)
w(' File "%s", line %s, in %s\n' % (filename, lineno, name))
if line:
w(' %s\n' % line.strip())
elif color:
s = ' File "%s", line %s, in %s\n' % (filename, lineno, name)
w(colorize('doctest_ignored', s))
if line:
w(' %s\n' % colorize('doctest_ignored', line.strip()))
else:
w(' File "%s", line %s, in %s\n' % (filename, lineno, name))
if line:
w(' %s\n' % line.strip())
tb_info = locals.get('__traceback_info__')
if tb_info is not None:
w(' Extra information: %s\n' % repr(tb_info))
tb_supplement = locals.get('__traceback_supplement__')
if tb_supplement is not None:
tb_supplement = tb_supplement[0](*tb_supplement[1:])
# TODO these should be hookable
from zope.tales.tales import TALESTracebackSupplement
from zope.pagetemplate.pagetemplate \
import PageTemplateTracebackSupplement
if isinstance(tb_supplement, PageTemplateTracebackSupplement):
template = tb_supplement.manageable_object.pt_source_file()
if template:
w(' Template "%s"\n' % template)
elif isinstance(tb_supplement, TALESTracebackSupplement):
w(' Template "%s", line %s, column %s\n'
% (tb_supplement.source_url, tb_supplement.line,
tb_supplement.column))
line = linecache.getline(tb_supplement.source_url,
tb_supplement.line)
if line:
w(' %s\n' % line.strip())
w(' Expression: %s\n' % tb_supplement.expression)
else:
w(' __traceback_supplement__ = %r\n' % (tb_supplement, ))
# Add the representation of the exception itself.
lines = traceback.format_exception_only(etype, value)
if color:
lines = colorizer.colorize_exception_only(lines)
list.extend(lines)
return list
class CustomTestResult(unittest._TextTestResult):
"""Customised TestResult.
It can show a progress bar, and displays tracebacks for errors and failures
as soon as they happen, in addition to listing them all at the end.
"""
__super = unittest._TextTestResult
__super_init = __super.__init__
__super_startTest = __super.startTest
__super_stopTest = __super.stopTest
__super_printErrors = __super.printErrors
__super_printErrorList = __super.printErrorList
def __init__(self, stream, descriptions, verbosity, count, cfg, hooks):
self.__super_init(stream, descriptions, verbosity)
self.count = count
self.cfg = cfg
self.hooks = hooks
if cfg.progress:
self.dots = False
self._lastWidth = 0
self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1
def startTest(self, test):
n = self.testsRun + 1
if self.cfg.progress:
# verbosity == 0: 'xxxx/xxxx (xxx.x%)'
# verbosity == 1: 'xxxx/xxxx (xxx.x%): test name'
# verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok'
self.stream.write("\r%4d" % n)
if self.count:
self.stream.write("/%d (%5.1f%%)"
% (self.count, n * 100.0 / self.count))
if self.showAll: # self.cfg.verbosity == 1
self.stream.write(": ")
elif self.cfg.verbosity:
name = self.getShortDescription(test)
width = len(name)
if width < self._lastWidth:
name += " " * (self._lastWidth - width)
self.stream.write(": %s" % name)
self._lastWidth = width
self.stream.flush()
self.__super_startTest(test) # increments testsRun by one and prints
self.testsRun = n # override the testsRun calculation
for hook in self.hooks:
hook.startTest(test)
def stopTest(self, test):
for hook in self.hooks:
hook.stopTest(test)
self.__super_stopTest(test)
def getDescription(self, test):
return name_of_test(test, self.cfg.basedir)
def getShortDescription(self, test):
s = name_of_test(test, self.cfg.basedir)
if len(s) > self._maxWidth:
# In most cases s is "package.module.class.method".
# Try to keep the method name intact, and replace the middle
# part of "package.module.class" with an ellipsis.
namelen = len(s.split('.')[-1])
left = max(0, (self._maxWidth - namelen) / 2 - 1)
right = self._maxWidth - left - 3
s = "%s...%s" % (s[:left], s[-right:])
return s
def printErrors(self):
w = self.stream.writeln
if self.cfg.progress and not (self.dots or self.showAll):
w()
if self.cfg.immediate_errors and (self.errors or self.failures):
if self.cfg.colorizer is not None:
w(self.cfg.colorizer.colorize('separator', self.separator1))
w(self.cfg.colorizer.colorize('title', "Tests that failed"))
w(self.cfg.colorizer.colorize('separator', self.separator2))
else:
w(self.separator1)
w("Tests that failed")
w(self.separator2)
self.__super_printErrors()
def formatError(self, err):
return "".join(format_exception(basedir=self.cfg.basedir,
colorizer=self.cfg.colorizer, *err))
def printTraceback(self, kind, test, err):
w = self.stream.writeln
if self.cfg.colorizer is not None:
c = self.cfg.colorizer.colorize
else:
c = lambda texttype, text: text
w()
w(c('separator', self.separator1))
kind = c('fail', kind)
description = c('longtestname', self.getDescription(test))
w("%s: %s" % (kind, description))
w(c('separator', self.separator2))
w(self.formatError(err))
w()
def addFailure(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("FAIL", test, err)
if self.cfg.postmortem:
pdb.post_mortem(sys.exc_info()[2])
self.failures.append((test, self.formatError(err)))
def addError(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("ERROR", test, err)
if self.cfg.postmortem:
pdb.post_mortem(sys.exc_info()[2])
self.errors.append((test, self.formatError(err)))
def printErrorList(self, flavour, errors):
if self.cfg.immediate_errors:
for test, err in errors:
description = self.getDescription(test)
self.stream.writeln("%s: %s" % (flavour, description))
else:
self.__super_printErrorList(flavour, errors)
class CustomTestRunner(unittest.TextTestRunner):
"""Customised TestRunner.
See CustomisedTextResult for a list of extensions.
"""
__super = unittest.TextTestRunner
__super_init = __super.__init__
__super_run = __super.run
def __init__(self, cfg, hooks=None, stream=sys.stderr, count=None):
self.__super_init(verbosity=cfg.verbosity, stream=stream)
self.cfg = cfg
if hooks is not None:
self.hooks = hooks
else:
self.hooks = []
self.count = count
def run(self, test):
"""Run the given test case or test suite."""
if self.count is None:
self.count = test.countTestCases()
if self.cfg.colorizer is not None:
c = self.cfg.colorizer.colorize
else:
c = lambda texttype, text: text
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
result.printErrors()
run = result.testsRun
if not self.cfg.quiet:
self.stream.writeln(c('separator', result.separator2))
run_str = c('count', str(run))
time_str = c('count', '%.3f' % timeTaken)
self.stream.writeln("Ran %s test%s in %ss" %
(run_str, run != 1 and "s" or "", time_str))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write(c('fail', "FAILED"))
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write(" (failures=%s" % c('count', str(failed)))
if errored:
if failed: self.stream.write(", ")
else: self.stream.write("(")
self.stream.write("errors=%s" % c('count', str(errored)))
self.stream.writeln(")")
elif not self.cfg.quiet:
self.stream.writeln(c('pass', "OK"))
return result
def _makeResult(self):
return CustomTestResult(self.stream, self.descriptions, self.verbosity,
cfg=self.cfg, count=self.count,
hooks=self.hooks)
def main(argv):
"""Main program."""
# Environment
if sys.version_info < (2, 3):
print >> sys.stderr, '%s: need Python 2.3 or later' % argv[0]
print >> sys.stderr, 'your python is %s' % sys.version
return 1
# Defaults
cfg = Options()
if not cfg.basedir:
cfg.basedir = os.path.join(os.path.dirname(argv[0]), 'src')
cfg.basedir = os.path.abspath(cfg.basedir)
# Figure out terminal size
try:
import curses
except ImportError:
pass
else:
try:
curses.setupterm()
cols = curses.tigetnum('cols')
if cols > 0:
cfg.screen_width = cols
except curses.error:
pass
# Option processing
try:
opts, args = getopt.gnu_getopt(argv[1:], 'hvpcCqufwd1s:',
['list-files', 'list-tests', 'list-hooks',
'level=', 'all-levels', 'coverage',
'search-in=', 'immediate-errors',
'delayed-errors', 'help'])
except getopt.error, e:
print >> sys.stderr, '%s: %s' % (argv[0], e)
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
for k, v in opts:
if k in ['-h', '--help']:
print __doc__
return 0
elif k == '-v':
cfg.verbosity += 1
cfg.quiet = False
elif k == '-p':
cfg.progress = True
cfg.quiet = False
elif k == '-c':
cfg.colorizer = Colorizer(dark_colormap)
elif k == '-C':
cfg.colorizer = Colorizer(light_colormap)
elif k == '-q':
cfg.verbosity = 0
cfg.progress = False
cfg.quiet = True
elif k == '-u':
cfg.unit_tests = True
elif k == '-f':
cfg.functional_tests = True
elif k == '-d':
cfg.postmortem = True
elif k == '-w':
cfg.warn_omitted = True
elif k == '-1':
cfg.first_doctest_failure = True
elif k == '--list-files':
cfg.list_files = True
cfg.run_tests = False
elif k == '--list-tests':
cfg.list_tests = True
cfg.run_tests = False
elif k == '--list-hooks':
cfg.list_hooks = True
cfg.run_tests = False
elif k == '--coverage':
cfg.coverage = True
elif k == '--level':
try:
cfg.level = int(v)
except ValueError:
print >> sys.stderr, '%s: invalid level: %s' % (argv[0], v)
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
elif k == '--all-levels':
cfg.level = None
elif k in ('-s', '--search-in'):
dir = os.path.abspath(v)
if not dir.startswith(cfg.basedir):
print >> sys.stderr, ('%s: argument to --search-in (%s) must'
' be a subdir of %s'
% (argv[0], v, cfg.basedir))
return 1
cfg.search_in += (dir, )
elif k == '--immediate-errors':
cfg.immediate_errors = True
elif k == '--delayed-errors':
cfg.immediate_errors = False
else:
print >> sys.stderr, '%s: invalid option: %s' % (argv[0], k)
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
if args:
cfg.pathname_regex = args[0]
if len(args) > 1:
cfg.test_regex = args[1]
if len(args) > 2:
print >> sys.stderr, '%s: too many arguments: %s' % (argv[0], args[2])
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
if not cfg.unit_tests and not cfg.functional_tests:
cfg.unit_tests = True
if not cfg.search_in:
cfg.search_in = (cfg.basedir, )
# Do not print "Imported %d modules in %.3fs" if --list-* was specified
# or if quiet mode is enabled.
if cfg.quiet or cfg.list_tests or cfg.list_hooks or cfg.list_files:
cfg.print_import_time = False
# Set up the python path
sys.path.insert(0, cfg.basedir)
# Set up tracing before we start importing things
tracer = None
if cfg.run_tests and cfg.coverage:
import trace
# trace.py in Python 2.3.1 is buggy:
# 1) Despite sys.prefix being in ignoredirs, a lot of system-wide
# modules are included in the coverage reports
# 2) Some module file names do not have the first two characters,
# and in general the prefix used seems to be arbitrary
# These bugs are fixed in src/trace.py which should be in PYTHONPATH
# before the official one.
ignoremods = ['test']
ignoredirs = [sys.prefix, sys.exec_prefix]
tracer = trace.Trace(count=True, trace=False,
ignoremods=ignoremods, ignoredirs=ignoredirs)
# Configure doctests
if cfg.first_doctest_failure:
import doctest
# The doctest module in Python 2.3 does not have this feature
if hasattr(doctest, 'REPORT_ONLY_FIRST_FAILURE'):
doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE)
# Also apply the flag to zope.testing.doctest, if it exists
try:
from zope.testing import doctest
doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE)
except ImportError:
pass
# Make sure we can identify doctests before we start the filtering
try:
import zope.testing.doctest
global DocFileCase_classes
DocFileCase_classes += (zope.testing.doctest.DocFileCase,)
except ImportError:
pass
# Finding and importing
test_files = get_test_files(cfg)
if cfg.list_tests or cfg.run_tests:
test_cases = get_test_cases(test_files, cfg, tracer=tracer)
if cfg.list_hooks or cfg.run_tests:
test_hooks = get_test_hooks(test_files, cfg, tracer=tracer)
# Configure the logging module
import logging
logging.basicConfig()
logging.root.setLevel(logging.CRITICAL)
# Running
success = True
if cfg.list_files:
baselen = len(cfg.basedir) + 1
print "\n".join([fn[baselen:] for fn in test_files])
if cfg.list_tests:
print "\n".join([name_of_test(test, cfg.basedir)
for test in test_cases])
if cfg.list_hooks:
print "\n".join([str(hook) for hook in test_hooks])
if cfg.run_tests:
runner = CustomTestRunner(cfg, test_hooks, count=len(test_cases))
suite = unittest.TestSuite()
suite.addTests(test_cases)
if tracer is not None:
success = tracer.runfunc(runner.run, suite).wasSuccessful()
results = tracer.results()
results.write_results(show_missing=True, coverdir=cfg.coverdir)
else:
success = runner.run(suite).wasSuccessful()
# That's all
if success:
return 0
else:
return 1
if __name__ == '__main__':
exitcode = main(sys.argv)
sys.exit(exitcode)
| gorakhargosh/select_backport | runtests.py | Python | mit | 42,181 | [
"VisIt"
] | 573ebdfe2df116b739241422221cfd69b7b642833b2e12f309c966e6b78b3637 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.decorators import lazifyTestClass
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.misc.test_utils import ignore_warning
from hyperspy.misc.utils import slugify
from hyperspy.axes import GeneratorLen
class TestModelJacobians:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
self.low_loss = 7.0
self.weights = 0.3
m.axis.axis = np.array([1, 0])
m.channel_switches = np.array([0, 1], dtype=bool)
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1
m[0].centre.value = 2.0
m[0].sigma.twin = m[0].centre
m._low_loss = mock.MagicMock()
m.low_loss.return_value = self.low_loss
self.model = m
m.convolution_axis = np.zeros(2)
def test_jacobian_not_convolved(self):
m = self.model
m.convolved = False
jac = m._jacobian((1, 2, 3), None, weights=self.weights)
np.testing.assert_array_almost_equal(
jac.squeeze(),
self.weights
* np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0)]),
)
assert m[0].A.value == 1
assert m[0].centre.value == 2
assert m[0].sigma.value == 2
def test_jacobian_convolved(self):
m = self.model
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[0].convolved = False
m[1].convolved = True
jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights)
np.testing.assert_array_almost_equal(
jac.squeeze(),
self.weights
* np.array(
[
m[0].A.grad(0),
m[0].sigma.grad(0) + m[0].centre.grad(0),
m[1].A.grad(0) * self.low_loss,
m[1].centre.grad(0) * self.low_loss,
m[1].sigma.grad(0) * self.low_loss,
]
),
)
assert m[0].A.value == 1
assert m[0].centre.value == 2
assert m[0].sigma.value == 2
assert m[1].A.value == 3
assert m[1].centre.value == 4
assert m[1].sigma.value == 5
class TestModelCallMethod:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Gaussian())
self.model = m
def test_call_method_no_convolutions(self):
m = self.model
m.convolved = False
m[1].active = False
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2, r1)
np.testing.assert_allclose(m[0].function(0), r2)
m.convolved = True
r1 = m(non_convolved=True)
r2 = m(non_convolved=True, onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2, r1)
np.testing.assert_allclose(m[0].function(0), r2)
def test_call_method_with_convolutions(self):
m = self.model
m._low_loss = mock.MagicMock()
m.low_loss.return_value = 0.3
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[1].active = False
m[0].convolved = True
m[1].convolved = False
m[2].convolved = False
m.convolution_axis = np.array([0.0])
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2.3, r1)
np.testing.assert_allclose(m[0].function(0) * 1.3, r2)
def test_call_method_binned(self):
m = self.model
m.convolved = False
m.remove(1)
m.signal.axes_manager[-1].is_binned = True
m.signal.axes_manager[-1].scale = 0.3
r1 = m()
np.testing.assert_allclose(m[0].function(0) * 0.3, r1)
class TestModelPlotCall:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.__call__ = mock.MagicMock()
m.__call__.return_value = np.array([0.5, 0.25])
m.axis = mock.MagicMock()
m.fetch_stored_values = mock.MagicMock()
m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool)
self.model = m
def test_model2plot_own_am(self):
m = self.model
m.axis.axis.shape = (5,)
res = m._model2plot(m.axes_manager)
np.testing.assert_array_equal(
res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan])
)
assert m.__call__.called
assert m.__call__.call_args[1] == {"non_convolved": False, "onlyactive": True}
assert not m.fetch_stored_values.called
def test_model2plot_other_am(self):
m = self.model
res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False)
np.testing.assert_array_equal(res, np.array([0.5, 0.25]))
assert m.__call__.called
assert m.__call__.call_args[1] == {"non_convolved": False, "onlyactive": True}
assert 2 == m.fetch_stored_values.call_count
class TestModelSettingPZero:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.1
m[0].centre._number_of_elements = 2
m[0].centre.value = (2.2, 3.3)
m[0].sigma.value = 4.4
m[0].sigma.free = False
m[0].A._bounds = (0.1, 0.11)
m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31))
m[0].sigma._bounds = (0.4, 0.41)
self.model = m
def test_setting_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m.p0 = None
m._set_p0()
assert m.p0 == (1.1, 2.2, 3.3)
def test_fetching_from_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m[-1].A.value = 100
m[-1].sigma.value = 200
m[-1].centre.value = 300
m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8)
m._fetch_values_from_p0()
assert m[0].A.value == 1.2
assert m[0].centre.value == (2.3, 3.4)
assert m[0].sigma.value == 4.4
assert m[1].A.value == 100
assert m[1].sigma.value == 200
assert m[1].centre.value == 300
def test_setting_boundaries(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
with pytest.warns(
VisibleDeprecationWarning,
match=r".* has been deprecated and will be made private",
):
m.set_boundaries()
assert m.free_parameters_boundaries == [(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)]
def test_setting_mpfit_parameters_info(self):
m = self.model
m[0].A.bmax = None
m[0].centre.bmin = None
m[0].centre.bmax = 0.31
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
with pytest.warns(
VisibleDeprecationWarning,
match=r".* has been deprecated and will be made private",
):
m.set_mpfit_parameters_info()
assert m.mpfit_parinfo == [
{"limited": [True, False], "limits": [0.1, 0]},
{"limited": [False, True], "limits": [0, 0.31]},
{"limited": [False, True], "limits": [0, 0.31]},
]
class TestModel1D:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
self.model = m
def test_errfunc(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0
np.testing.assert_equal(m._errfunc(None, 1.0, None), 2.0)
np.testing.assert_equal(m._errfunc(None, 1.0, 0.3), 0.6)
def test_errfunc_sq(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0 * np.ones(2)
np.testing.assert_equal(m._errfunc_sq(None, np.ones(2), None), 8.0)
np.testing.assert_equal(m._errfunc_sq(None, np.ones(2), 0.3), 0.72)
def test_gradient_ls(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_ls(None, None), 2.8)
def test_gradient_ml(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0 * np.ones(2)
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_ml(None, 1.2), 8.4)
def test_gradient_huber(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_huber(None, None), 1.4)
def test_model_function(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.3
m[0].centre.value = 0.003
m[0].sigma.value = 0.1
param = (100, 0.1, 0.2)
np.testing.assert_array_almost_equal(176.03266338, m._model_function(param))
assert m[0].A.value == 100
assert m[0].centre.value == 0.1
assert m[0].sigma.value == 0.2
def test_append_existing_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
with pytest.raises(ValueError, match="Component already in model"):
m.append(g)
def test_append_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
assert g in m
assert g.model is m
assert g._axes_manager is m.axes_manager
assert all([hasattr(p, "map") for p in g.parameters])
def test_calculating_convolution_axis(self):
m = self.model
# setup
m.axis.offset = 10
m.axis.size = 10
ll_axis = mock.MagicMock()
ll_axis.size = 7
ll_axis.value2index.return_value = 3
m._low_loss = mock.MagicMock()
m.low_loss.axes_manager.signal_axes = [
ll_axis,
]
# calculation
m.set_convolution_axis()
# tests
np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23))
np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0)
def test_access_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m["test"] is g2
def test_access_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m[1] is g2
def test_component_name_when_append(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
]
m.extend(gs)
assert m["Gaussian"] is gs[0]
assert m["Gaussian_0"] is gs[1]
assert m["Gaussian_1"] is gs[2]
def test_several_component_with_same_name(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
]
m.extend(gs)
m[0]._name = "hs.model.components1D.Gaussian"
m[1]._name = "hs.model.components1D.Gaussian"
m[2]._name = "hs.model.components1D.Gaussian"
with pytest.raises(ValueError, match=r"Component name .* not found in model"):
m["Gaussian"]
def test_no_component_with_that_name(self):
m = self.model
with pytest.raises(ValueError, match=r"Component name .* not found in model"):
m["Voigt"]
def test_component_already_in_model(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
with pytest.raises(ValueError, match="Component already in model"):
m.extend((g1, g1))
def test_remove_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1)
assert len(m) == 0
def test_remove_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(0)
assert len(m) == 0
def test_remove_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1.name)
assert len(m) == 0
def test_delete_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[0]
assert g1 not in m
def test_delete_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[g1.name]
assert g1 not in m
def test_delete_slice(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g3 = hs.model.components1D.Gaussian()
g3.A.twin = g1.A
g1.sigma.twin = g2.sigma
m.extend([g1, g2, g3])
del m[:2]
assert g1 not in m
assert g2 not in m
assert g3 in m
assert not g1.sigma.twin
assert not g1.A._twins
def test_get_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component("test") is g2
def test_get_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component(1) is g2
def test_get_component_by_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component(g2) is g2
def test_get_component_wrong(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
with pytest.raises(ValueError, match="Not a component or component id"):
m._get_component(1.2)
def test_components_class_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
assert getattr(m.components, g1.name) is g1
def test_components_class_change_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
assert getattr(m.components, g1.name) is g1
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
with pytest.raises(AttributeError, match="object has no attribute 'Gaussian'"):
getattr(m.components, "Gaussian")
def test_components_class_change_invalid_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "1, Test This!"
assert getattr(m.components, slugify(g1.name, valid_variable_name=True)) is g1
def test_components_class_change_name_del_default2(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
invalid_name = "1, Test This!"
g1.name = invalid_name
g1.name = "test"
with pytest.raises(AttributeError, match=r"object has no attribute .*"):
getattr(m.components, slugify(invalid_name))
def test_snap_parameter_bounds(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g2 = hs.model.components1D.Gaussian()
m.append(g2)
g3 = hs.model.components1D.Gaussian()
m.append(g3)
g4 = hs.model.components1D.Gaussian()
m.append(g4)
p = hs.model.components1D.Polynomial(3, legacy=False)
m.append(p)
g1.A.value = 3.0
g1.centre.bmin = 300.0
g1.centre.value = 1.0
g1.sigma.bmax = 15.0
g1.sigma.value = 30
g2.A.value = 1
g2.A.bmin = 0.0
g2.A.bmax = 3.0
g2.centre.value = 0
g2.centre.bmin = 1
g2.centre.bmax = 3.0
g2.sigma.value = 4
g2.sigma.bmin = 1
g2.sigma.bmax = 3.0
g3.A.bmin = 0
g3.A.value = -3
g3.A.free = False
g3.centre.value = 15
g3.centre.bmax = 10
g3.centre.free = False
g3.sigma.value = 1
g3.sigma.bmin = 0
g3.sigma.bmax = 0
g4.active = False
g4.A.value = 300
g4.A.bmin = 500
g4.centre.value = 0
g4.centre.bmax = -1
g4.sigma.value = 1
g4.sigma.bmin = 10
p.a0.value = 1
p.a1.value = 2
p.a2.value = 3
p.a3.value = 4
p.a0.bmin = 2
p.a1.bmin = 2
p.a2.bmin = 2
p.a3.bmin = 2
p.a0.bmax = 3
p.a1.bmax = 3
p.a2.bmax = 3
p.a3.bmax = 3
m.ensure_parameters_in_bounds()
np.testing.assert_allclose(g1.A.value, 3.0)
np.testing.assert_allclose(g2.A.value, 1.0)
np.testing.assert_allclose(g3.A.value, -3.0)
np.testing.assert_allclose(g4.A.value, 300.0)
np.testing.assert_allclose(g1.centre.value, 300.0)
np.testing.assert_allclose(g2.centre.value, 1.0)
np.testing.assert_allclose(g3.centre.value, 15.0)
np.testing.assert_allclose(g4.centre.value, 0)
np.testing.assert_allclose(g1.sigma.value, 15.0)
np.testing.assert_allclose(g2.sigma.value, 3.0)
np.testing.assert_allclose(g3.sigma.value, 0.0)
np.testing.assert_allclose(g4.sigma.value, 1)
np.testing.assert_almost_equal(p.a0.value, 2)
np.testing.assert_almost_equal(p.a1.value, 2)
np.testing.assert_almost_equal(p.a2.value, 3)
np.testing.assert_almost_equal(p.a3.value, 3)
class TestModelPrintCurrentValues:
def setup_method(self, method):
np.random.seed(1)
s = hs.signals.Signal1D(np.arange(10, 100, 0.1))
s.axes_manager[0].scale = 0.1
s.axes_manager[0].offset = 10
m = s.create_model()
with ignore_warning(message="The API of the `Polynomial` component"):
m.append(hs.model.components1D.Polynomial(1))
m.append(hs.model.components1D.Offset())
self.s = s
self.m = m
@pytest.mark.parametrize("only_free", [True, False])
@pytest.mark.parametrize("skip_multi", [True, False])
def test_print_current_values(self, only_free, skip_multi):
self.m.print_current_values(only_free, skip_multi)
def test_print_current_values_component_list(self):
self.m.print_current_values(component_list=list(self.m))
class TestModelUniformBinned:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10)).create_model()
self.o = hs.model.components1D.Offset()
self.m.append(self.o)
@pytest.mark.parametrize("uniform", [True, False])
@pytest.mark.parametrize("binned", [True, False])
def test_binned_uniform(self, binned, uniform):
m = self.m
if binned:
m.signal.axes_manager[-1].is_binned = True
m.signal.axes_manager[-1].scale = 0.3
if uniform:
m.signal.axes_manager[-1].convert_to_non_uniform_axis()
np.testing.assert_allclose(m[0].function(0) * 0.3, m())
self.m.print_current_values()
class TestStoreCurrentValues:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10)).create_model()
self.o = hs.model.components1D.Offset()
self.m.append(self.o)
def test_active(self):
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
assert self.o.offset.map["values"][0] == 2
assert self.o.offset.map["is_set"][0]
def test_not_active(self):
self.o.active = False
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
assert self.o.offset.map["values"][0] != 2
class TestSetCurrentValuesTo:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10).reshape(2, 5)).create_model()
self.comps = [hs.model.components1D.Offset(), hs.model.components1D.Offset()]
self.m.extend(self.comps)
def test_set_all(self):
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
assert (self.comps[0].offset.map["values"] == 2).all()
assert (self.comps[1].offset.map["values"] == 2).all()
def test_set_1(self):
self.comps[1].offset.value = 2
self.m.assign_current_values_to_all([self.comps[1]])
assert (self.comps[0].offset.map["values"] != 2).all()
assert (self.comps[1].offset.map["values"] == 2).all()
def test_fetch_values_from_arrays():
m = hs.signals.Signal1D(np.arange(10)).create_model()
gaus = hs.model.components1D.Gaussian(A=100, sigma=10, centre=3)
m.append(gaus)
values = np.array([1.2, 3.4, 5.6])
stds = values - 1
m.fetch_values_from_array(values, array_std=stds)
parameters = sorted(gaus.free_parameters, key=lambda x: x.name)
for v, s, p in zip(values, stds, parameters):
assert p.value == v
assert p.std == s
class TestAsSignal:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(20).reshape(2, 2, 5)).create_model()
self.comps = [hs.model.components1D.Offset(), hs.model.components1D.Offset()]
self.m.extend(self.comps)
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
def test_all_components_simple(self):
s = self.m.as_signal()
assert np.all(s.data == 4.0)
def test_one_component_simple(self):
s = self.m.as_signal(component_list=[0])
assert np.all(s.data == 2.0)
assert self.m[1].active
def test_all_components_multidim(self):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal()
assert np.all(s.data == 4.0)
self.m[0]._active_array[0] = False
s = self.m.as_signal()
np.testing.assert_array_equal(
s.data, np.array([np.ones((2, 5)) * 2, np.ones((2, 5)) * 4])
)
assert self.m[0].active_is_multidimensional
def test_one_component_multidim(self):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(component_list=[0])
assert np.all(s.data == 2.0)
assert self.m[1].active
assert not self.m[1].active_is_multidimensional
s = self.m.as_signal(component_list=[1])
np.testing.assert_equal(s.data, 2.0)
assert self.m[0].active_is_multidimensional
self.m[0]._active_array[0] = False
s = self.m.as_signal(component_list=[1])
assert np.all(s.data == 2.0)
s = self.m.as_signal(component_list=[0])
np.testing.assert_array_equal(
s.data, np.array([np.zeros((2, 5)), np.ones((2, 5)) * 2])
)
@pytest.mark.parametrize("kw", [{"parallel": True}, {"max_workers": 1}])
def test_warnings(self, kw):
with pytest.warns(
VisibleDeprecationWarning, match=r".* has been deprecated",
):
_ = self.m.as_signal(**kw)
def test_out_of_range_to_nan(self):
index = 2
self.m.channel_switches[:index] = False
s1 = self.m.as_signal(component_list=[0], out_of_range_to_nan=True)
s2 = self.m.as_signal(component_list=[0], out_of_range_to_nan=False)
np.testing.assert_allclose(
self.m.channel_switches, [False, False, True, True, True]
)
np.testing.assert_allclose(s2.data, np.ones_like(s2) * 2)
np.testing.assert_allclose(s1.isig[index:], s2.isig[index:])
np.testing.assert_allclose(
s1.isig[:index], np.ones_like(s1.isig[:index].data) * np.nan
)
np.testing.assert_allclose(
s1.isig[index:], np.ones_like(s1.isig[index:].data) * 2
)
def test_out_argument(self):
out = self.m.as_signal()
out.data.fill(0)
s = self.m.as_signal(out=out)
assert np.all(s.data == 4.0)
@lazifyTestClass
class TestCreateModel:
def setup_method(self, method):
self.s = hs.signals.Signal1D(np.asarray([0]))
self.im = hs.signals.Signal2D(np.ones([1, 1]))
def test_create_model(self):
from hyperspy.models.model1d import Model1D
from hyperspy.models.model2d import Model2D
assert isinstance(self.s.create_model(), Model1D)
assert isinstance(self.im.create_model(), Model2D)
class TestAdjustPosition:
def setup_method(self, method):
self.s = hs.signals.Signal1D(np.random.rand(10, 10, 20))
self.m = self.s.create_model()
def test_enable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
assert len(self.m._position_widgets) == 1
# Check that both line and label was added
assert len(list(self.m._position_widgets.values())[0]) == 2
def test_disable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.disable_adjust_position()
assert len(self.m._position_widgets) == 0
def test_enable_all(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
assert len(self.m._position_widgets) == 2
def test_enable_all_zero_start(self):
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
assert len(self.m._position_widgets) == 1
def test_manual_close(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
list(self.m._position_widgets.values())[0][0].close()
assert len(self.m._position_widgets) == 2
assert len(list(self.m._position_widgets.values())[0]) == 1
list(self.m._position_widgets.values())[0][0].close()
assert len(self.m._position_widgets) == 1
assert len(list(self.m._position_widgets.values())[0]) == 2
self.m.disable_adjust_position()
assert len(self.m._position_widgets) == 0
def test_deprecated_private_functions():
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
with pytest.warns(VisibleDeprecationWarning, match=r".* has been deprecated"):
m.set_boundaries()
with pytest.warns(VisibleDeprecationWarning, match=r".* has been deprecated"):
m.set_mpfit_parameters_info()
def generate():
for i in range(3):
yield (i,i)
class Test_multifit_iterpath():
def setup_method(self, method):
data = np.ones((3, 3, 10))
s = hs.signals.Signal1D(data)
ax = s.axes_manager
m = s.create_model()
G = hs.model.components1D.Gaussian()
m.append(G)
self.m = m
self.ax = ax
def test_custom_iterpath(self):
indices = np.array([(0,0), (1,1), (2,2)])
self.ax.iterpath = indices
self.m.multifit(iterpath=indices)
set_indices = np.array(np.where(self.m[0].A.map['is_set'])).T
np.testing.assert_array_equal(set_indices, indices[:,::-1])
def test_model_generator(self):
gen = generate()
self.m.axes_manager.iterpath = gen
self.m.multifit()
def test_model_GeneratorLen(self):
gen = GeneratorLen(generate(), 3)
self.m.axes_manager.iterpath = gen
class TestSignalRange:
def setup_method(self, method):
s = hs.signals.Signal1D(np.random.rand(10, 10, 20))
s.axes_manager[-1].offset = 100
m = s.create_model()
self.s = s
self.m = m
def test_parse_value(self):
m = self.m
assert m._parse_signal_range_values(105, 110) == (5, 10)
with pytest.raises(ValueError):
m._parse_signal_range_values(89, 85)
def test_parse_value_negative_scale(self):
m = self.m
s = self.s
s.axes_manager[-1].scale = -1
assert m._parse_signal_range_values(89, 85) == (11, 15)
with pytest.raises(ValueError):
m._parse_signal_range_values(85, 89)
assert m._parse_signal_range_values(89, 20) == (11, 19)
def test_parse_roi(self):
m = self.m
roi = hs.roi.SpanROI(105, 110)
assert m._parse_signal_range_values(roi) == (5, 10)
| thomasaarholt/hyperspy | hyperspy/tests/model/test_model.py | Python | gpl-3.0 | 30,297 | [
"Gaussian"
] | 6a4e44a3c960f59a988aac46f5e9b2c043d0f15e335438ef8697ac79d2529b79 |
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
for x in range(batches.shape[2]):
assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\
'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])
assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\
'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])
last_seq_target = (test_batch_size-1) * 35 + 31
last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))
last_seq[-1] = batches[0,0,0,0]
assert np.array_equal(batches[-1,1,-1], last_seq),\
'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)
_print_success_message()
def test_tokenize(token_lookup):
with tf.Graph().as_default():
symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n'])
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), \
'Returned type is {}.'.format(type(token_dict))
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, \
'Missing symbols: {}'.format(missing_symbols)
assert not unknown_symbols, \
'Unknown symbols: {}'.format(unknown_symbols)
# Check values type
bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]
assert not bad_value_type,\
'Found token as {} type.'.format(bad_value_type[0])
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if ' ' in k]
val_has_spaces = [val for val in token_dict.values() if ' ' in val]
assert not key_has_spaces,\
'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])
assert not val_has_spaces,\
'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert not symbol_val,\
'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)
_print_success_message()
def test_get_inputs(get_inputs):
with tf.Graph().as_default():
input_data, targets, lr = get_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
# Check rank
input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())
targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())
lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())
assert input_rank == 2,\
'Input has wrong rank. Rank {} found.'.format(input_rank)
assert targets_rank == 2,\
'Targets has wrong rank. Rank {} found.'.format(targets_rank)
assert lr_rank == 0,\
'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)
_print_success_message()
def test_get_init_cell(get_init_cell):
with tf.Graph().as_default():
test_batch_size_ph = tf.placeholder(tf.int32, [])
test_rnn_size = 256
cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)
# Check type
assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
'Cell is wrong type. Found {} type'.format(type(cell))
# Check for name attribute
assert hasattr(init_state, 'name'),\
'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
# Check name
assert init_state.name == 'initial_state:0',\
'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)
_print_success_message()
def test_get_embed(get_embed):
with tf.Graph().as_default():
embed_shape = [50, 5, 256]
test_input_data = tf.placeholder(tf.int32, embed_shape[:2])
test_vocab_size = 27
test_embed_dim = embed_shape[2]
embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)
# Check shape
assert embed.shape == embed_shape,\
'Wrong shape. Found shape {}'.format(embed.shape)
_print_success_message()
def test_build_rnn(build_rnn):
with tf.Graph().as_default():
test_rnn_size = 256
test_rnn_layer_size = 2
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
outputs, final_state = build_rnn(test_cell, test_inputs)
# Check name
assert hasattr(final_state, 'name'),\
'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
assert final_state.name == 'final_state:0',\
'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)
# Check shape
assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_build_nn(build_nn):
with tf.Graph().as_default():
test_input_data_shape = [128, 5]
test_input_data = tf.placeholder(tf.int32, test_input_data_shape)
test_rnn_size = 256
test_embed_dim = 300
test_rnn_layer_size = 2
test_vocab_size = 27
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)
# Check name
assert hasattr(final_state, 'name'), \
'Final state doesn\'t have the "name" attribute. Are you using build_rnn?'
assert final_state.name == 'final_state:0', \
'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)
# Check Shape
assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \
'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_get_tensors(get_tensors):
test_graph = tf.Graph()
with test_graph.as_default():
test_input = tf.placeholder(tf.int32, name='input')
test_initial_state = tf.placeholder(tf.int32, name='initial_state')
test_final_state = tf.placeholder(tf.int32, name='final_state')
test_probs = tf.placeholder(tf.float32, name='probs')
input_text, initial_state, final_state, probs = get_tensors(test_graph)
# Check correct tensor
assert input_text == test_input,\
'Test input is wrong tensor'
assert initial_state == test_initial_state, \
'Initial state is wrong tensor'
assert final_state == test_final_state, \
'Final state is wrong tensor'
assert probs == test_probs, \
'Probabilities is wrong tensor'
_print_success_message()
def test_pick_word(pick_word):
with tf.Graph().as_default():
test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])
test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}
pred_word = pick_word(test_probabilities, test_int_to_vocab)
# Check type
assert isinstance(pred_word, str),\
'Predicted word is wrong type. Found {} type.'.format(type(pred_word))
# Check word is from vocab
assert pred_word in test_int_to_vocab.values(),\
'Predicted word not found in int_to_vocab.'
_print_success_message()
| d-k-b/udacity-deep-learning | tv-script-generation/problem_unittests.py | Python | mit | 13,365 | [
"MOE"
] | b0987ede237f7413276b10a00969c15bdcd582983cf439b6557bbdaed3c995ce |
"""
Functions to extract trajectory from a perses relative calculation
"""
import numpy as np
import mdtraj as md
from perses.analysis.utils import open_netcdf
def get_hybrid_topology(file):
""" Extracts hybrid_topology object from the .npy file generated by relative calculations
Parameters
----------
file : string
name of, or path to .npy file
Returns
-------
phases : list
list of phases found in .npy file
topologies : list
list of hybrid_topology objects
"""
hybrid_factory = np.load(file)
hybrid_factory = hybrid_factory.flatten()[0]
phases = []
topologies = []
for phase in hybrid_factory.keys():
topologies.append(hybrid_factory[phase].hybrid_topology)
return phases, topologies
def get_positions(file):
ncfile = open_netcdf(file)
all_positions = ncfile.variables['positions']
results = []
for i,pos in enumerate(all_positions):
coords = []
pos = pos.tolist()
results.append(pos[0])
return results
def write_trajectory(positions, topology, outputfile='trajectory.pdb',center=True,offline=None):
if offline != None:
traj = md.Trajectory(positions[0::offline],topology)
else:
traj = md.Trajectory(positions, topology)
if center == True:
traj.center_coordinates()
traj.save_pdb(outputfile)
return
if __name__ == '__main__':
import sys
ncfilename = sys.argv[1]
factoryfilename = sys.argv[2]
positions = get_positions(ncfilename)
_, topology = get_hybrid_topology(factoryfilename)
write_trajectory(positions,topology[0])
| choderalab/perses | perses/analysis/extract_trajectory.py | Python | mit | 1,648 | [
"MDTraj"
] | ad52eaaf77ea27e17f4f0484d4f58237f8aff78b1aca964993a6b5bf69db17ba |
try:
visits = int(open(counter_file).read())
except IOError:
# first visit : the file does not exist
visits = 0
if not hasattr(Session(),"user"):
visits += 1
out = open(counter_file,'w')
out.write(str(visits))
out.close()
Session().user = None # create attribute user
print "%s visits" %visits
| jhjguxin/PyCDC | Karrigell-2.3.5/webapps/demo/counter.py | Python | gpl-3.0 | 340 | [
"VisIt"
] | 988395175023477d05d7e2f34a6a40affa0959abf1ed7651a2a2fb8bb72827c8 |
from ScriptingLanguage.Interpreter import Interpreter, UndefinedVariable
__author__ = 'chronium'
def visit_expression(expression):
left = expression.value[0].visit()
if expression.value[1]:
right = expression.value[2].visit()
try:
return {
'+': left + right,
'-': left - right,
'*': left * right,
'/': left / right,
'%': left % right
}.get(expression.value[1], 0)
except ZeroDivisionError:
return 0
return left
def visit_var(identifier):
try:
return Interpreter().get_variable(identifier.value)
except UndefinedVariable:
print('[{}] is undefined'.format(identifier.value))
return 0
def visit_number(number):
return number.value
| chronium/ChronoScript | ScriptingLanguage/Visitors/ExpressionVisitor.py | Python | gpl-2.0 | 820 | [
"VisIt"
] | 83a77cb6d6d4d023f67c152fe836cbe505f0215eb343b93a579f72a2645bc80f |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA.
#
# Authors:
# Alberto Martín <alberto.martin@bitergia.com>
# Santiago Dueñas <sduenas@bitergia.com>
#
import json
import logging
import time
from grimoirelab_toolkit.datetime import datetime_to_utc
from grimoirelab_toolkit.uris import urijoin
from ...backend import (Backend,
BackendCommand,
BackendCommandArgumentParser)
from ...client import HttpClient
from ...utils import DEFAULT_DATETIME
CATEGORY_QUESTION = "question"
MAX_QUESTIONS = 100 # Maximum number of reviews per query
logger = logging.getLogger(__name__)
class StackExchange(Backend):
"""StackExchange backend for Perceval.
This class retrieves the questions stored in any of the
StackExchange sites. To initialize this class the
site must be provided.
:param site: StackExchange site
:param tagged: filter items by question Tag
:param api_token: StackExchange access_token for the API
:param max_questions: max of questions per page retrieved
:param tag: label used to mark the data
:param archive: archive to store/retrieve items
"""
version = '0.10.4'
CATEGORIES = [CATEGORY_QUESTION]
def __init__(self, site, tagged=None, api_token=None,
max_questions=MAX_QUESTIONS, tag=None, archive=None):
origin = site
super().__init__(origin, tag=tag, archive=archive)
self.site = site
self.api_token = api_token
self.tagged = tagged
self.max_questions = max_questions
self.client = None
def fetch(self, category=CATEGORY_QUESTION, from_date=DEFAULT_DATETIME):
"""Fetch the questions from the site.
The method retrieves, from a StackExchange site, the
questions updated since the given date.
:param from_date: obtain questions updated since this date
:returns: a generator of questions
"""
if not from_date:
from_date = DEFAULT_DATETIME
from_date = datetime_to_utc(from_date)
kwargs = {'from_date': from_date}
items = super().fetch(category, **kwargs)
return items
def fetch_items(self, category, **kwargs):
"""Fetch the questions
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for questions at site '%s', with tag '%s' and updated from '%s'",
self.site, self.tagged, str(from_date))
whole_pages = self.client.get_questions(from_date)
for whole_page in whole_pages:
questions = self.parse_questions(whole_page)
for question in questions:
yield question
@classmethod
def has_archiving(cls):
"""Returns whether it supports archiving items on the fetch process.
:returns: this backend supports items archive
"""
return True
@classmethod
def has_resuming(cls):
"""Returns whether it supports to resume the fetch process.
:returns: this backend supports items resuming
"""
return True
@staticmethod
def metadata_id(item):
"""Extracts the identifier from a StackExchange item."""
return str(item['question_id'])
@staticmethod
def metadata_updated_on(item):
"""Extracts the update time from a StackExchange item.
The timestamp is extracted from 'last_activity_date' field.
This date is a UNIX timestamp but needs to be converted to
a float value.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
return float(item['last_activity_date'])
@staticmethod
def metadata_category(item):
"""Extracts the category from a StackExchange item.
This backend only generates one type of item which is
'question'.
"""
return CATEGORY_QUESTION
@staticmethod
def parse_questions(raw_page):
"""Parse a StackExchange API raw response.
The method parses the API response retrieving the
questions from the received items
:param items: items from where to parse the questions
:returns: a generator of questions
"""
raw_questions = json.loads(raw_page)
questions = raw_questions['items']
for question in questions:
yield question
def _init_client(self, from_archive=False):
"""Init client"""
return StackExchangeClient(self.site, self.tagged, self.api_token, self.max_questions,
self.archive, from_archive)
class StackExchangeClient(HttpClient):
"""StackExchange API client.
This class implements a simple client to retrieve questions from
any Stackexchange site.
:param site: URL of the Bugzilla server
:param tagged: filter items by question Tag
:param token: StackExchange access_token for the API
:param max_questions: max number of questions per query
:param archive: an archive to store/read fetched data
:param from_archive: it tells whether to write/read the archive
:raises HTTPError: when an error occurs doing the request
"""
# Filters are immutable and non-expiring. This filter allows to retrieve all
# the information regarding Each question. To know more, visit
# https://api.stackexchange.com/docs/questions and paste the filter in the
# whitebox filter. It will display a list of checkboxes with the selected
# values for the filter provided.
QUESTIONS_FILTER = 'Bf*y*ByQD_upZqozgU6lXL_62USGOoV3)MFNgiHqHpmO_Y-jHR'
STACKEXCHANGE_API_URL = 'https://api.stackexchange.com'
VERSION_API = '2.2'
def __init__(self, site, tagged, token, max_questions=MAX_QUESTIONS, archive=None, from_archive=False):
super().__init__(self.STACKEXCHANGE_API_URL, archive=archive, from_archive=from_archive)
self.site = site
self.tagged = tagged
self.token = token
self.max_questions = max_questions
def get_questions(self, from_date):
"""Retrieve all the questions from a given date.
:param from_date: obtain questions updated since this date
"""
page = 1
url = urijoin(self.base_url, self.VERSION_API, "questions")
req = self.fetch(url, payload=self.__build_payload(page, from_date))
questions = req.text
data = req.json()
tquestions = data['total']
nquestions = data['page_size']
self.__log_status(data['quota_remaining'],
data['quota_max'],
nquestions,
tquestions)
while questions:
yield questions
questions = None
if data['has_more']:
page += 1
backoff = data.get('backoff', None)
if backoff:
logger.debug("Expensive query. Wait %s secs to send a new request",
backoff)
time.sleep(float(backoff))
req = self.fetch(url, payload=self.__build_payload(page, from_date))
data = req.json()
questions = req.text
nquestions += data['page_size']
self.__log_status(data['quota_remaining'],
data['quota_max'],
nquestions,
tquestions)
@staticmethod
def sanitize_for_archive(url, headers, payload):
"""Sanitize payload of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns url, headers and the sanitized payload
"""
if 'key' in payload:
payload.pop('key')
return url, headers, payload
def __build_payload(self, page, from_date, order='desc', sort='activity'):
payload = {'page': page,
'pagesize': self.max_questions,
'order': order,
'sort': sort,
'tagged': self.tagged,
'site': self.site,
'key': self.token,
'filter': self.QUESTIONS_FILTER}
if from_date:
timestamp = int(from_date.timestamp())
payload['min'] = timestamp
return payload
def __log_status(self, quota_remaining, quota_max, page_size, total):
logger.debug("Rate limit: %s/%s" % (quota_remaining,
quota_max))
if (total != 0):
nquestions = min(page_size, total)
logger.info("Fetching questions: %s/%s" % (nquestions,
total))
else:
logger.info("No questions were found.")
class StackExchangeCommand(BackendCommand):
"""Class to run StackExchange backend from the command line."""
BACKEND = StackExchange
@staticmethod
def setup_cmd_parser():
"""Returns the StackExchange argument parser."""
parser = BackendCommandArgumentParser(from_date=True,
token_auth=True,
archive=True)
# StackExchange options
group = parser.parser.add_argument_group('StackExchange arguments')
group.add_argument('--site', dest='site',
required=True,
help="StackExchange site")
group.add_argument('--tagged', dest='tagged',
help="filter items by question Tag")
group.add_argument('--max-questions', dest='max_questions',
type=int, default=MAX_QUESTIONS,
help="Maximum number of questions requested in the same query")
return parser
| valeriocos/perceval | perceval/backends/core/stackexchange.py | Python | gpl-3.0 | 10,898 | [
"VisIt"
] | 1717aba5de58093a7cf1e003c490a92ce7fad78f6be6be14320ca1a7ea1a4bc5 |
#!/usr/bin/env python
import sys
import os
import subprocess
import pysam
import textwrap
import argparse
def print_header():
print textwrap.dedent("""\
##fileformat=VCFv4.1
##phasing=none
##INDIVIDUAL=TRUTH
##SAMPLE=<ID=TRUTH,Individual="TRUTH",Description="bamsurgeon spike-in">
##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">
##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">
##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">
##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic mutation in primary">
##INFO=<ID=VAF,Number=1,Type=Float,Description="Variant Allele Frequency">
##INFO=<ID=DPR,Number=1,Type=Float,Description="Avg Depth in Region (+/- 1bp)">
##INFO=<ID=MATEID,Number=1,Type=String,Description="Breakend mate">
##ALT=<ID=INV,Description="Inversion">
##ALT=<ID=DUP,Description="Duplication">
##ALT=<ID=DEL,Description="Deletion">
##ALT=<ID=INS,Description="Insertion">
##ALT=<ID=IGN,Description="Ignore SNVs in Interval">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSPIKEIN""")
def printvcf(chrom, bnd1, bnd2, precise, type, svlen, ref):
base1 = ref.fetch(chrom, bnd1, bnd1+1)
base2 = ref.fetch(chrom, bnd2, bnd2+1)
alt = '<' + type.upper() + '>'
info = []
if not precise:
info.append('IMPRECISE')
info.append('CIPOS=-100,100')
info.append('CIEND=-100,100')
info.append('SOMATIC')
info.append('SVTYPE=' + type.upper())
info.append('END=' + str(bnd2))
info.append('SVLEN=' + str(svlen))
infostr = ';'.join(info)
print '\t'.join((chrom, str(bnd1), '.', base1, alt, '100', 'PASS', infostr, 'GT', './.'))
'''
Finish this later: we'd like to output breakends rather than events but it'll require special consideration of each SV type
if type == 'del':
id1 = '_'.join((type,str(n),'A'))
id2 = '_'.join((type,str(n),'B'))
alt1 = base2 + '[' + chrom + ':' + str(bnd2) + '['
alt2 = base1 + '[' + chrom + ':' + str(bnd1) + '['
'''
def precise_interval(mutline, ref):
m = mutline.split()
chrom, refstart, refend = m[1:4]
refstart = int(refstart)
refend = int(refend)
if m[0] == 'ins':
contigstart = int(m[6])
contigend = int(m[6])+1
else:
contigstart = int(m[6])
contigend = int(m[7])
precise = True
bnd1 = refstart + contigstart
bnd2 = refstart + contigend
assert bnd1 < bnd2
printvcf(chrom, bnd1, bnd2, precise, m[0], bnd2-bnd1, ref)
def ignore_interval(mutline, ref):
m = mutline.split()
chrom, refstart, refend = m[1:4]
refstart = int(refstart)
refend = int(refend)
assert refstart < refend
printvcf(chrom, refstart, refend, True, 'IGN', refend-refstart, ref)
def main(args):
print_header()
ref = pysam.Fastafile(args.ref)
with open(args.log, 'r') as log:
for line in log:
for mutype in ('ins', 'del', 'inv', 'dup'):
if line.startswith(mutype):
precise_interval(line.strip(), ref)
if args.mask:
ignore_interval(line.strip(), ref)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="make VCF 'truth' file given log file (hint: concatenate them) from addsv.py")
parser.add_argument('-r', '--ref', dest='ref', required=True, help="reference indexed with samtools faidx")
parser.add_argument('-l', '--log', dest='log', required=True, help="log file from addsv.py")
parser.add_argument('--mask', action="store_true", default=False, help="output contig intervals, used to mask accidental SNVs if combining mutation types in one BAM")
args = parser.parse_args()
main(args)
| chaelir/bamsurgeon | etc/makevcf_sv.py | Python | mit | 4,073 | [
"pysam"
] | e58ad491aaa04415ee07aeef58dd4c915bb67de346770a4d4dc2b75ffb130810 |
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import re
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
%(rlimit)s
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import imp
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
bytes = str
MOD_DESC = ('.py', 'U', imp.PY_SOURCE)
PY3 = False
else:
unicode = str
MOD_DESC = ('.py', 'r', imp.PY_SOURCE)
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
# Note: Remove the following section when we switch to zipimport
# Write the module to disk for imp.load_module
module = os.path.join(temp_path, '__main__.py')
with open(module, 'wb') as f:
f.write(z.read('__main__.py'))
f.close()
# End pre-zipimport section
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
with open(module, 'rb') as mod:
imp.load_module('__main__', mod, module, MOD_DESC)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, '__main__.py')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
import imp
with open(script_path, 'r') as f:
importer = imp.load_module('__main__', f, script_path, ('.py', 'r', imp.PY_SOURCE))
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
import coverage
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
ANSIBALLZ_RLIMIT_TEMPLATE = '''
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_name = os.path.basename(interpreter).strip()
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
# check for first-class interpreter config
interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
if C.config.get_configuration_definitions().get(interpreter_config_key):
# a config def exists for this interpreter type; consult config for the value
interpreter_out = C.config.get_config_value(interpreter_config_key, variables=task_vars)
discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
interpreter_out = templar.template(interpreter_out.strip())
facts_from_task_vars = task_vars.get('ansible_facts', {})
# handle interpreter discovery if requested
if interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
if discovered_interpreter_config not in facts_from_task_vars:
# interpreter discovery is desired, but has not been run for this host
raise InterpreterDiscoveryRequiredError("interpreter discovery needed",
interpreter_name=interpreter_name,
discovery_mode=interpreter_out)
else:
interpreter_out = facts_from_task_vars[discovered_interpreter_config]
else:
# a config def does not exist for this interpreter type; consult vars for a possible direct override
interpreter_config = u'ansible_%s_interpreter' % interpreter_name
if interpreter_config not in task_vars:
return None, interpreter
interpreter_out = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter_out
if args:
shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
try:
tree = ast.parse(data)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('basic',) not in py_module_names:
pkg_dir_info = imp.find_module('basic', module_utils_paths)
normalized_modules.add(('basic',))
py_module_cache[('basic',)] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('__main__.py', b_module_data)
py_module_cache = {('__init__',): (b'', '[builtin]')}
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=os.environ['_ANSIBLE_COVERAGE_OUTPUT']
)
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_args, environment, async_timeout, become,
become_method, become_user, become_password, become_flags,
module_substyle
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
| ujenmr/ansible | lib/ansible/executor/module_common.py | Python | gpl-3.0 | 44,052 | [
"VisIt"
] | 25dac55812d002e164c32033ee594e48b086010dfed7032042466886c1bb9708 |
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import pandas as pd
import pymc3 as pm
import seaborn as sns
import numba
from scipy import integrate
from .binaries import make_systems
from wisps.utils.tools import get_distance
from tqdm import tqdm
import wisps
import wisps.simulations as wispsim
#constant distance
EUCLID_SOUTH=SkyCoord(l=24.6*u.deg, b=-82.0*u.deg , frame='galactic').galactic
EUCLID_NORTH=SkyCoord("18:0:0 66:33:0", obstime="J2000", unit=u.deg).galactic
EUCLID_FORNAX=SkyCoord("3:32:28.0 -27:48:30" , obstime="J2000", unit=u.deg).galactic
#mag limits
EUCLID_MAG_LIMITS={'J': 27., 'H': 27.}
#absol=#wisps.absolute_magnitude_jh(wispsim.SPGRID)[1]
#RELJ=wisps.POLYNOMIAL_RELATIONS['abs_mags']['EUCLID_J']
RELH=wisps.POLYNOMIAL_RELATIONS['abs_mags']['EUCLID_H']
absol=(RELH[0])(np.random.normal(wispsim.SPGRID, RELH[1]))
DMAXS=dict(zip(wispsim.SPGRID, (wisps.get_distance(absol, np.ones_like(absol)*EUCLID_MAG_LIMITS['H']))))
#constants
Rsun=wispsim.Rsun
Zsun=wispsim.Zsun
def distance_sampler(l, b, nsample=1000, h=300, dmax=1000):
"""
sample the galaxy given a scale height
l and b must be in radian
"""
def logp(l, b, r, z, d, h):
return np.log((d**2)*wispsim.density_function(r, z, h))
with pm.Model() as model:
d=pm.Uniform('d', lower=0., upper=dmax, testval=10.,)
x=pm.Deterministic('x', Rsun-d*np.cos(b)*np.cos(l))
y=pm.Deterministic('y', -d*np.cos(b)*np.sin(l))
r=pm.Deterministic('r', (x**2+y**2)**0.5 )
z=pm.Deterministic('z', Zsun+ d * np.sin(b))
like = pm.DensityDist('likelihood', logp, observed={'l':l, 'b':b,
'r': r, 'z': z, 'd':d, 'h':h})
trace = pm.sample(draws=int(nsample), cores=4, step=pm.Metropolis(), tune=int(nsample/20), discard_tuned_samples=True)
return trace
@np.vectorize
def euclid_selection_function(j, h):
#a simple step-function selection function based on mag cuts
s=0.
if j <EUCLID_MAG_LIMITS['J']:
s=1.
if h<EUCLID_MAG_LIMITS['H']:
s=1.
return s
def expected_numbers(model, field='fornax', h=300):
#compute exepected numbers in euclid fields based on different model based on a mode
#spectral type
syst=make_systems(model_name=model, bfraction=0.2)
sortedindx=np.argsort((syst['system_spts']).flatten())
spts=((syst['system_spts']).flatten())[sortedindx]
#
round_spts=np.round(spts).astype(float).flatten()
print (round_spts.shape)
#distances
dists=None
ds=np.zeros(len(spts))
coordinate_field=None
if field=='fornax':
coordinate_field=EUCLID_FORNAX
if field=='south':
coordinate_field=EUCLID_SOUTH
if field=='north':
coordinate_field=EUCLID_NORTH
for k in DMAXS.keys():
trace=distance_sampler(coordinate_field.l.radian, coordinate_field.b.radian, dmax=DMAXS[k], nsample=1000, h=h)
indx= (round_spts==k)
ds[indx]=np.random.choice(trace['d'].flatten(), len(round_spts[indx]))
absjs, abshs=wisps.absolute_magnitude_jh(spts)
dists=ds
appjs=absjs+5*np.log10(dists/10.0)
apphs=abshs+5*np.log10(dists/10.0)
#selection probabilities
s=euclid_selection_function(appjs, apphs)
#teffs are for normalizing the LF
return {'spt': spts, 'ds': dists, 'j':appjs, 'h':apphs, 'prob': s, 'teff': ((syst['system_teff']).flatten())[sortedindx]} | caganze/wisps | wisps/simulations/euclid.py | Python | mit | 3,455 | [
"Galaxy"
] | e3ee3958182bfdf2a6b14ff2f6d088663a612f1845727873b042e9518de6751b |
"""
In this program we study the Lax-Wendroff method and its convergence.
Marina von Steinkirch, spring/2013
Based on Mike Zingale's code 2nd-order accurate finite-volume implementation
of linear advection with piecewise linear slope reconstruction
"""
import numpy
import pylab
import math
class ccFVgrid:
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.xmin = xmin
self.xmax = xmax
self.ng = ng
self.nx = nx
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx
self.xl = xmin + (numpy.arange(nx+2*ng)-ng)*self.dx
self.xr = xmin + (numpy.arange(nx+2*ng)-ng+1.0)*self.dx
# storage for the solution
self.a = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def period(self, u):
""" return the period for advection with velocity u """
return (self.xmax - self.xmin)/u
def scratchArray(self):
""" return a scratch array dimensioned for our grid """
return numpy.zeros((self.nx+2*self.ng), dtype=numpy.float64)
def fillBCs(self):
""" fill all single ghostcell with periodic boundary conditions """
# left boundary
n = 0
while (n < self.ng):
self.a[self.ilo-1-n] = self.a[self.ihi-n]
n += 1
# right boundary
n = 0
while (n < self.ng):
self.a[self.ihi+1+n] = self.a[self.ilo+n]
n += 1
def initCond(self, type="gaussian"):
if type == "tophat":
self.a[numpy.logical_and(self.x >= 0.333, self.x <= 0.666)] = 1.0
elif type == "sine":
self.a[:] = numpy.sin(2.0*math.pi*self.x/(self.xmax-self.xmin))
elif type == "gaussian":
self.a[:] = 1.0 + numpy.exp(-((self.x - 0.5)**2)/0.1**2)
self.ainit = self.a.copy()
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
#-----------------------------------------------------------------------------
# advection-specific routines
def timestep(g, C, u):
return C*g.dx/u
def states(g, dt, u):
""" compute the left and right interface states """
# compute the piecewise linear slopes
slope = g.scratchArray()
i = g.ilo-1
while (i <= g.ihi+1):
slope[i] = 0.5*(g.a[i+1] - g.a[i-1])/g.dx
i += 1
# loop over all the interfaces. Here, i refers to the left
# interface of the zone. Note that thre are 1 more interfaces
# than zones
al = g.scratchArray()
ar = g.scratchArray()
i = g.ilo
while (i <= g.ihi+1):
# left state on the current interface comes from zone i-1
al[i] = g.a[i-1] + 0.5*g.dx*(1.0 - u*dt/g.dx)*slope[i-1]
# right state on the current interface comes from zone i
ar[i] = g.a[i] - 0.5*g.dx*(1.0 + u*dt/g.dx)*slope[i]
i += 1
return al, ar
def riemann(u, al, ar):
""" Riemann problem for advection -- this is simply upwinding,
but we return the flux """
if u > 0.0:
return u*al
else:
return u*ar
def update(g, dt, flux):
""" conservative update """
anew = g.scratchArray()
anew[g.ilo:g.ihi+1] = g.a[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return anew
def evolve(nx, C, u, numPeriods, ICname):
ng = 2
# create the grid
g = ccFVgrid(nx, ng)
t = 0.0
tmax = numPeriods*g.period(u)
# initialize the data
g.initCond(ICname)
# main evolution loop
while (t < tmax):
# fill the boundary conditions
g.fillBCs()
# get the timestep
dt = timestep(g, C, u)
if (t + dt > tmax):
dt = tmax - t
# get the interface states
al, ar = states(g, dt, u)
# solve the Riemann problem at all interfaces
flux = riemann(u, al, ar)
# do the conservative update
#anew = update(g, dt, flux)
anew = g.scratchArray()
i = g.ilo
while (i <= g.ihi):
# FTCS
#
anew[i] = g.a[i] - 0.5*C*(g.a[i+1] - g.a[i-1]) + 0.5*C**2*(g.a[i+1] -2*g.a[i] + g.a[i-1])
i += 1
g.a[:] = anew[:]
t += dt
return g
#-----------------------------------------------------------------------------
u = 1.0
nx = 64
C = 0.8
g = evolve(nx, C, u, 5, "tophat")
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], g.ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.savefig("fv-advect.png")
#-----------------------------------------------------------------------------
# convergence test
problem = "gaussian"
N = [32, 64, 128, 256]
u = 1.0
C = 0.8
err = []
for nx in N:
g = evolve(nx, C, u, 5, problem)
# compute the error
err.append(g.norm(g.a - g.ainit))
print g.dx, nx, err[-1]
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
pylab.scatter(N, err, color="r")
pylab.plot(N, err[len(N)-1]*(N[len(N)-1]/N)**2, color="k")
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.savefig("plm-converge.png")
print("done!")
| bt3gl/Numerical-Methods-for-Physics | homework4_advection_hyperbolic_PDEs/lax_wendroff/lax_method.py | Python | apache-2.0 | 5,600 | [
"Gaussian"
] | c645e7068c20460ea7c04eddb5150f348a0141f6f87a83f6adbeb21e86a48283 |
# -*- coding: utf-8 -*-
"""
===============================================================================
Atom classes for crystal structures (:mod:`sknano.core.atoms._basis_atoms`)
===============================================================================
.. currentmodule:: sknano.core.atoms._basis_atoms
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
from ._lattice_atoms import LatticeAtom, LatticeAtoms
from ._periodic_atoms import PBCAtom, PBCAtoms
from ._xyz_atoms import XYZAtom, XYZAtoms
__all__ = ['BasisAtom', 'BasisAtoms']
class BasisAtom(PBCAtom, LatticeAtom, XYZAtom):
"""An abstract object representation of a crystal structure basis atom.
Parameters
----------
lattice : :class:`~sknano.core.crystallography.Crystal3DLattice`
xs, ys, zs : float
"""
pass
class BasisAtoms(PBCAtoms, LatticeAtoms, XYZAtoms):
"""An `Atoms` sub-class for crystal structure basis atoms.
Sub-class of `Atoms` class, and a container class for lists of
:class:`~sknano.core.atoms.BasisAtom` instances.
Parameters
----------
atoms : {None, sequence, `BasisAtoms`}, optional
if not `None`, then a list of `BasisAtom` instance objects or an
existing `BasisAtoms` instance object.
"""
@property
def __atom_class__(self):
return BasisAtom
| scikit-nano/scikit-nano | sknano/core/atoms/_basis_atoms.py | Python | bsd-2-clause | 1,432 | [
"CRYSTAL"
] | 56707c2aa1fe05893ab72f409ca76a5178fdb922e36bafdfe549e2b1d678421d |
#! /usr/bin/env python
import macrodensity as md
import math
import numpy as np
import matplotlib.pyplot as plt
import csv
from itertools import izip
potential_file = 'LOCPOT' # The file with VASP output for potential
coordinate_file = 'POSCAR' # The coordinates file NOTE NOTE This must be in vasp 4 format
species = "O" # The species whose on-site potential you are interested in
sample_cube = [5,5,5] # The size of the sampling cube in units of mesh points (NGX/Y/Z)
# Nothing below here should require changing
#------------------------------------------------------------------
# Get the potential
# This section should not be altered
#------------------------------------------------------------------
vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(potential_file)
vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)
resolution_x = vector_a/NGX
resolution_y = vector_b/NGY
resolution_z = vector_c/NGZ
grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)
## Get the gradiens (Field), if required.
## Comment out if not required, due to compuational expense.
grad_x,grad_y,grad_z = np.gradient(grid_pot[:,:,:],resolution_x,resolution_y,resolution_z)
#------------------------------------------------------------------
##------------------------------------------------------------------
## Getting the potentials for a group of atoms, in this case the Os
## NOTE THIS REQUIRES ASE to be available https://wiki.fysik.dtu.dk/ase/index.html
##------------------------------------------------------------------
##------------------------------------------------------------------
import ase # Only add this if want to read in coordinates
from ase.io import write # Only add this if want to read in coordinates
from ase.io import vasp # Only add this if want to read in coordinates
coords = ase.io.vasp.read_vasp(coordinate_file)
scaled_coords = coords.get_scaled_positions()
symbols = coords.get_chemical_symbols()
ox_coords = []
for i, atom in enumerate(coords):
if symbols[i] == species:
ox_coords.append(scaled_coords[i])
grid_position = np.zeros(shape=(3))
potentials_list = []
i = 0
num_bins = 20
for coord in ox_coords:
i = i + 1
grid_position[0] = coord[0]
grid_position[1] = coord[1]
grid_position[2] = coord[2]
cube = sample_cube # The size of the cube x,y,z in units of grid resolution.
origin = [grid_position[0]-2,grid_position[1]-2,grid_position[2]-1]
volume_average, cube_var = md.volume_average(origin, cube, grid_pot, NGX, NGY, NGZ)
potentials_list.append(volume_average)
n, bins, patches = plt.hist(potentials_list, num_bins,normed=100, facecolor='#6400E1', alpha=0.5)
plt.xlabel('Hartree potential (V)',fontsize = 22)
plt.ylabel('% of centres',fontsize = 22)
plt.savefig('Potentials.png',dpi=300)
plt.show()
| WMD-group/MacroDensity | examples/OnSitePotential.py | Python | mit | 2,829 | [
"ASE",
"VASP"
] | db6b6a68d156df98f53a27cc69b74dbf1b2b01bdf4e7c239fe70c23dad1bd69b |
#!/usr/bin/env python3
# This file is part of the LSLTools package.
# Copyright 2014
# Jari Torniainen <jari.torniainen@ttl.fi>,
# Andreas Henelius <andreas.henelius@ttl.fi>
# Finnish Institute of Occupational Health
#
# This code is released under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Please see the file LICENSE for details
import sys
import random
import threading
import numpy
from . import pylsl_python3 as pylsl
import nitime.algorithms.autoregressive as ar
import scipy.signal
import uuid
import subprocess
class RandomData(threading.Thread):
""" Generates a stream which inputs randomly generated values into the LSL
Generates a multichannel stream which inputs random values into the LSL.
Generated random values follow gaussian distribution where mean and
std values can be specified as arguments.
"""
def __init__(self, stream_name="RANDOM", stream_type="RND", nch=3,
srate=128, mean=0, std=1, fmt='float32', nsamp=0):
""" Initializes a data generator
Args:
stream_name: <string> name of the stream in LSL (default="RANDOM")
stream_type: <string> type of the stream in LSL (default="RND")
nch: <integer> number of channels (default=3)
srate: <integer> sampling rate (default=128)
mean: <float> mean value for the random values (default=0)
std: <float> standard deviation for the random values (default=1)
fmt: <string> sample data format (default='float32')
nsamp: <integer> number of samples in total (0=inf)
"""
threading.Thread.__init__(self)
# Stream stuff
self.stream_name = stream_name
self.stream_type = stream_type
self.nch = nch
self.srate = srate
self.fmt = fmt
self.uuid = str(uuid.uuid1())[0:4]
# Synthetic data stuff, makes normally distributed noise for now
self.data_mean = mean
self.data_std = std
# Setup timing related variables
self.last_push = pylsl.local_clock()
self.interval = 1.0 / float(self.srate)
if nsamp == 0:
self.nsamp = numpy.inf
else:
self.nsamp = nsamp
self.running = True
# Outlet
self.outlet = pylsl.StreamOutlet(pylsl.StreamInfo(self.stream_name,
self.stream_type,
self.nch,
self.srate,
self.fmt,
self.uuid))
def set_srate(self, srate):
""" Changes the sampling rate of the stream.
Args:
srate: <integer> new sampling rate
"""
self.srate = srate
self.interval = 1.0 / float(self.srate)
def set_mean(self, mean):
""" Changes the mean of the random values.
Args:
mean: <float> new mean value for random samples
"""
self.data_mean = mean
def set_std(self, std):
""" Changes the standard deviation of the random samples
Args:
std: <float> new standard deviation for random samples
"""
self.data_std = std
def push_sample(self):
""" Pushes samples to LSL. """
new_sample = []
for n in range(0, self.nch):
new_sample.append(numpy.random.normal(self.data_mean,
self.data_std))
self.outlet.push_sample(new_sample)
def stop(self):
""" Stops streaming. """
self.running = False
def run(self):
""" Loops for a specified time or forever. """
current_sample = 0
while current_sample < self.nsamp and self.running:
if pylsl.local_clock() - self.last_push >= self.interval:
self.last_push = pylsl.local_clock()
current_sample += 1
self.push_sample()
class LinearData(threading.Thread):
def __init__(self, stream_name="LINEAR", stream_type="LIN", nch=3,
srate=128, max_val=1000, fmt='float32', nsamp=0):
""" Initializes the linear data generator.
Args:
stream_name: <string> name of the stream in LSL (default="LINEAR")
stream_type: <string> type of the stream in LSL (default="LIN")
nch: <integer> number of channels (default=3)
srate: <integer> sampling rate (default=128)
max_val: <float> maximum value of the data (default=1000)
fmt: <string> format of the samples (default='float32')
nsamp: <integer> number of samples to stream (0=inf)
"""
threading.Thread.__init__(self)
# Stream stuff
self.stream_name = stream_name
self.stream_type = stream_type
self.nch = nch
self.srate = srate
self.fmt = fmt
self.uuid = str(uuid.uuid1())[0:4]
self.value = 0
self.MAX_VAL = max_val
self.running = True
# Setup update intervals and total number of samples to be streamed
self.last_push = pylsl.local_clock()
self.interval = 1.0 / float(self.srate)
if nsamp == 0:
self.nsamp = numpy.inf
else:
self.nsamp = nsamp
# Create the LSL outlet
self.outlet = pylsl.StreamOutlet(pylsl.StreamInfo(self.stream_name,
self.stream_type,
self.nch,
self.srate,
self.fmt,
self.uuid))
def set_srate(self, srate):
""" Changes the sampling rate of the stream.
Args:
srate: <integer> new sampling rate
"""
self.srate = srate
self.interval = 1.0 / float(self.srate)
def set_max_val(self, max_val):
""" Changes the maximum value of the linear data.
Args:
max_val: <float> new maximum value for linear data
"""
self.MAX_VAL = max_val
def push_sample(self):
""" Pushes linearly increasing value into the outlet. """
new_sample = []
for n in range(0, self.nch):
new_sample.append(self.value)
self.outlet.push_sample(new_sample)
def stop(self):
""" Stops pushing samples. """
self.running = False
def close_outlet(self):
""" Close outlet (might cause an error). """
self.outlet.__del__()
def run(self):
""" Loop for pushing samples. Loops for a set amount or forever. """
current_sample = 0
while current_sample < self.nsamp and self.running:
if pylsl.local_clock() - self.last_push >= self.interval:
self.last_push = pylsl.local_clock()
current_sample += 1
self.push_sample()
self.value += 1
if self.value > self.MAX_VAL: # Reset value
self.value = 0
class MarkerData(threading.Thread):
""" Creates a (string) marker LSL stream.
Creates a stream which sends string formatted markers to the LSL
at a constant rate. The stream can send just one type of marker or it
can randomly select the marker from a list of markers.
"""
def __init__(self, stream_name="MARKER", stream_type="MRK", srate=[1, 4],
markers=["ping", "pong"], nsamp=0):
""" Initializes a marker generator
Args:
stream_name: <string> name of the stream in LSL (default='MARKER')
stream_type: <string> type of the stream in LSL (default='MRK')
srate: [int, int] sampling rate of the stream (default=[1,4])
markers: <list> list of (string) markers (default=["ping","pong"])
nsamp: number of samples to stream (0=inf)
"""
threading.Thread.__init__(self)
self.stream_name = stream_name
self.stream_type = stream_type
self.srate = srate
self.markers = markers
self.uuid = str(uuid.uuid1())[0:4]
self.interval = 1.0 / float(random.randint(self.srate[0],
self.srate[1]))
self.last_push = pylsl.local_clock()
self.running = True
if nsamp == 0:
self.nsamp = numpy.inf
else:
self.nsamp = nsamp
self.outlet = pylsl.StreamOutlet(pylsl.StreamInfo(self.stream_name,
self.stream_type,
1,
self.srate[0],
'string',
self.uuid))
def set_srate(self, srate):
""" Changes the sampling rate of the stream.
Args:
srate: [int, int] new sampling rate range
"""
self.srate = srate
self.interval = 1.0 / float(random.randint(self.srate[0],
self.srate[1]))
self.last_push = pylsl.local_clock()
def set_markers(self, markers, append=False):
""" Changes the markers or adds new ones.
Args:
markers: <list> list of markers
append: <Boolean> True to append new markers, False to replace
"""
if append:
self.markers + markers
else:
self.markers = markers
def push_sample(self):
""" Pushes a randomly selected marker to the outlet. """
self.outlet.push_sample([random.choice(self.markers)])
def stop(self):
""" Stops looping. """
self.running = False
def run(self):
""" Main loop for pushing samples. """
current_sample = 0
while current_sample < self.nsamp and self.running:
if pylsl.local_clock() - self.last_push >= self.interval:
self.last_push = pylsl.local_clock()
self.interval = 1.0/float(random.randint(self.srate[0],
self.srate[1]))
current_sample += 1
self.push_sample()
class ECGData(threading.Thread):
""" Generates and feeds synthetic ECG in to LSL.
Uses the C implementation ECGSYN to generate simulated ECG
(http://www.physionet.org/physiotools/ecgsyn/). Needs a path to
ECGSYN as the first argument.
"""
def __init__(self, ecgsynpath, O="/tmp/dump.dat", n=256, s=256, S=256, a=0,
h=60, H=1, f=0.1, F=0.25, v=0.01, V=0.01, q=0.5, R=1,
stream_name="ECGSYN"):
""" Initializes the synthetic ECG data generator.
Args:
ecgsynpath: <string> path to ecgsyn
O: <string> path to temporary file (default "/tmp/dump.dat")
n: <integer> total number of heart beats per block (default=256)
s: <integer> ECG sampling rate (default=256)
S: <integer> internal sampling rate (default=256)
a: <float> amplitude of additive uniform noise (default=0)
h: <float> heart rate mean (default=60)
H: <float> heart rate std (default=1)
f: <float> low frequency (default=0.1)
F: <float> high frequency (default=0.25)
v: <float> low frequency standard deviation (default=0.01)
V: <float> high frequency standard deviation (default=0.01)
q: <float> LF/HF ratio (default=0.5)
R: <float> seed (default=1)
stream_name: <string> stream name
"""
threading.Thread.__init__(self)
self.path = ecgsynpath
self.uuid = str(uuid.uuid1())[0:4]
self.srate = s
self.buffer_warning = 10 * self.srate # this might break
# Timing
self.interval = 1.0 / float(self.srate)
self.last_push = pylsl.local_clock()
# Same parameters as ecgsyn_c, read their documentation for more
# detailed information
self.O = str(O) # Datadumping file
self.n = str(n) # Number of heartbeats
self.s = str(s) # ECG sampling rate
self.S = str(S) # Internal sampling rate
self.a = str(a) # Amplitude of additive uniform noise
self.h = str(h) # Heart rate mean
self.H = str(H) # Heart rate std
self.f = str(f) # Low frequency
self.F = str(F) # High frequency
self.v = str(v) # Low frequency standard deviation
self.V = str(V) # High frequency standard deviation
self.q = str(q) # LF/HF ratio
self.R = str(R) # Seed
# Setup LSL
info = pylsl.StreamInfo(stream_name, 'ECG', 1, self.srate,
'float32', self.uuid)
self.outlet = pylsl.StreamOutlet(info)
# Need a thread lock
self.lock = threading.Lock()
# Generate some of this to get this going
self.data = numpy.empty(0)
self.generate_data(False)
self.running = True
def generate_data(self, append_flag):
""" Generates more data and appends (maybe) it to buffer.
Args:
append_flag: <Boolean> does new data append to or replace current
"""
# generate more data to a tmpfile by calling ecgsyn_c with subprocess
subprocess.call([self.path, "-O", self.O, "-n", self.n,
"-s", self.s, "-S", self.S, "-a", self.a,
"-h", self.h, "-H", self.H, "-f", self.f,
"-F", self.F, "-v", self.v, "-V", self.V,
"-q", self.q, "-R", self.R])
# read data from tmpfile
d = numpy.genfromtxt(self.O, delimiter=" ")
# and add it to the buffer
self.lock.acquire()
if append_flag:
self.data = numpy.append(self.data, d, axis=0)
else:
self.data = d
self.lock.release()
def set_n(self, n):
""" Changes the number of heart beats generated per block.
Args:
n: <integer> number of heart beats per block of data
"""
self.n = str(n)
def set_s(self, s):
""" Changes the ECG sampling rate.
Args:
s: <integer> new sampling rate
"""
self.s = str(s)
self.interval = 1.0 / float(s)
def set_S(self, S):
""" Changes the internal sampling rate.
Args:
S: <integer> new internal sampling rate
"""
self.S = str(S)
def set_a(self, a):
""" Changes the amplitude of the additive (uniform) noise.
Args:
a: <float> new amplitude of the additive noise
"""
self.a = str(a)
def set_h(self, h):
""" Changes the mean heart rate.
Args:
h: <float> new mean heart rate
"""
self.h = str(h)
def set_H(self, H):
""" Changes the standard deviation of the heart rate.
Args:
H: <float> new heart rate standard deviation
"""
self.H = str(H)
def set_f(self, f):
""" Changes low frequency.
Args:
f: <float> new low frequency
"""
self.f = str(f)
def set_F(self, F):
""" Changes high frequency.
Args:
F: <float> new high frequency
"""
self.F = str(F)
def set_v(self, v):
""" Changes low frequency standard deviation.
Args:
v: <float> new low frequency standard deviation
"""
self.v = str(v)
def set_V(self, V):
""" Changes high frequency standard deviation.
Args:
V: <float> new high frequency standard deviation
"""
self.V = str(V)
def set_q(self, q):
""" Changes LF/HF ratio
Args:
q: <float> new LF/HF ratio
"""
self.q = str(q)
def set_r(self, R):
""" Changes seed.
Args:
R: <float> new seed
"""
self.R = str(R)
def reset(self):
""" Resets data streaming. Replaces current buffer with new data. """
r = threading.Thread(target=self.generate_data, args=[False])
r.start()
r.join()
def stop(self):
""" Stops streaming. """
self.running = False
def run(self):
""" Streaming loops. Pushes samples and requests data when needed. """
while self.running:
# push sample
if pylsl.local_clock() - self.last_push >= self.interval:
self.lock.acquire()
self.last_push = pylsl.local_clock()
self.outlet.push_sample([float(self.data[0, 1])])
self.data = numpy.delete(self.data, (0), axis=0)
self.lock.release()
# get more data
if self.data.shape[0] < self.buffer_warning:
t = threading.Thread(target=self.generate_data, args=[True])
t.start()
t.join()
class EEGData(threading.Thread):
""" Generates and feeds synthetic EEG into LSL.
Generates random EEG by driving white noise through an all-pole IIR
filter. The filter coefficients are defined by autoregression estimate
of the EEG signal. AR coefficients can be estimated from offline EEG
data or coefficients can be directly passed to the object.
"""
def __init__(self, data=False, ar_coefs=False, p=20, p2p=80, nch=1,
srate=512, stream_name='EEGSYN'):
""" Initialize the EEG-streamer object.
Args:
data: <array_like> EEG data used for estimating AR coefficients
ar_coefs: <array_like> AR coefficients
p: <integer> model order for the AR estimation (default p=20)
p2p: <float> peak-to-peak amplitude of simulated EEG (default=80)
nch: <integer> number of channels
srate: <float> sampling rate of the data
stream_name: <string> name of the stream in LSL (default='EEGSYN')
"""
threading.Thread.__init__(self)
if data:
# Calculate AR model from input data
data = data - numpy.mean(data)
self.A = ar.AR_est_YW(data, p)
self.A *= -1
self.A = numpy.insert(self.A, 0, 1)
self.srate = srate
self.p2p = numpy.std(data)
elif ar_coefs:
# Use the provided AR coefficients
self.A = ar_coefs
self.srate = srate
else:
# Use default AR coefficients
self.A = numpy.array((1, -2.45, 1.7625, 0.0116, -0.3228))
self.srate = 512
self.nch = nch
self.p2p = p2p # peak-to-peak amplitude
self.osc = dict()
self.interval = 1.0/float(self.srate)
self.last_push = pylsl.local_clock()
self.lock = threading.Lock()
self.buffer_size = self.srate*10
self.buffer_warning = self.buffer_size/2
self.uuid = str(uuid.uuid1())[0:4]
self.running = True
# Setup LSL
info = pylsl.StreamInfo(stream_name, 'EEG',
self.nch, self.srate, 'float32', self.uuid)
self.outlet = pylsl.StreamOutlet(info)
self.data = numpy.empty(0)
self.generate_data(False)
def generate_data(self, append_flag):
""" Add new data to the streaming buffer.
Args:
append_flag: <Boolean> does new data append to or replace current
"""
noise = numpy.random.normal(0, 1, (self.buffer_size, self.nch))
new_data = scipy.signal.lfilter((1, 0), self.A, noise, axis=0)
new_data *= self.p2p / numpy.std(new_data)
# Add oscillations (if there are any)
if self.osc:
for o in self.osc:
s = self.generate_oscillation(self.osc[o])
s *= ((self.osc[o][-1] * numpy.max(abs(new_data))) /
numpy.max(abs(s)))
new_data += s
self.lock.acquire()
if append_flag:
self.data = numpy.append(self.data, new_data, axis=0)
else:
self.data = new_data
self.lock.release()
def generate_oscillation(self, osc):
""" Generate oscillations for the signal.
Args:
osc: <list> list containing oscillation parameters
"""
w0, w1, wb, coef = osc
w0 = float(w0) / (float(self.srate) / 2.0)
w1 = float(w1) / (float(self.srate) / 2.0)
wp = [w0, w1] # Passband
wb0 = float(w0-wb) / (float(self.srate) / 2.0)
wb1 = float(w1+wb) / (float(self.srate) / 2.0)
ws = [wb0, wb1] # Stop band
N, wn = scipy.signal.cheb1ord(wp, ws, 0.1, 30)
b, a = scipy.signal.cheby1(N, 0.5, wn, btype='bandpass', output='ba')
s = numpy.random.normal(0, 1, (self.buffer_size, self.nch))
s = scipy.signal.lfilter(b, a, s, axis=0) * coef
return s
def add_oscillation(self, name, f_low, f_high, f_bound, ampl):
""" Adds a new oscillation.
Args:
name: <string> name of the stream
f_low: <float> lower boundary of the frequency band (in Hz)
f_high: <float> upper boundary of the frequency band (in Hz)
f_bound: <float> length of the transition band (in Hz)
ampl: <float> relative amplitude of the oscillation
"""
# should sanitize inputs here
if name in self.osc:
self.osc[name] = [f_low, f_high, f_bound, ampl]
else:
self.osc.update({name: [f_low, f_high, f_bound, ampl]})
def remove_oscillation(self, name):
""" Removes an oscillation from the list.
Args:
name: <string> name of the oscillation to be removed
"""
if name in self.osc:
del self.osc[name]
else:
print("Invalid oscillation name!")
def list_oscillations(self):
""" Lists all the current oscillations. """
print("Oscillation list:")
for osc in self.osc.keys():
print(osc)
def set_p2p(self, p2p):
""" Change peak-to-peak amplitude of the EEG signal.
Args:
p2p: <float> new peak-to-peak amplitude
"""
self.p2p = p2p
def set_AR(self, AR, srate=0):
""" Change autoregression coefficients of the signal.
Args:
AR: <array_like> array of new AR coefficients
srate: <float> sampling rate corresponding to the new AR model
"""
self.A = AR
if srate:
self.srate = srate
def reset(self):
""" Reset data streaming. Replaces current buffer with new data. """
r = threading.Thread(target=self.generate_data, args=[False])
r.start()
r.join()
def stop(self):
""" Stops streaming. """
self.running = False
def run(self):
""" Streaming loop. Pushes samples and generates new data as needed. """
while self.running:
# check if we need to push a new sample
if pylsl.local_clock() - self.last_push >= self.interval:
self.lock.acquire()
self.last_push = pylsl.local_clock()
self.outlet.push_sample(list(self.data[0, :]))
self.data = numpy.delete(self.data, 0, axis=0)
self.lock.release()
# check if we need to generate more data
if self.data.shape[0] < self.buffer_warning:
t = threading.Thread(target=self.generate_data, args=[True])
t.start()
t.join()
if __name__ == '__main__':
""" Give arguments, receive streams. Parameters not supported, will run with
defaults.
Args:
'Random': Starts a random stream
'Linear': Starts a linear stream
'Marker': Starts a marker stream
'ECG': Starts an ECG stream
'EEG': Starts an EEG stream
"""
if len(sys.argv) > 1:
streamers = []
for arg in sys.argv[1:]:
datatype = arg.lower()
if datatype == "random":
streamers.append(RandomData())
elif datatype == "linear":
streamers.append(LinearData())
elif datatype == "marker":
streamers.append(MarkerData())
elif datatype == "ecg":
path_to_ecgsyn = input("input path to ecgsyn: ")
streamers.append(ECGData(path_to_ecgsyn))
elif datatype == "eeg":
streamers.append(EEGData(nch=4))
else:
print("unknown data type!")
if streamers:
print("Starting stream(s)")
for s in streamers:
s.start()
while input('enter q to quit ') != 'q':
pass
print("Shutting down...")
for s in streamers:
s.stop()
print("Finished.")
else:
print("Provide at least 1 argument:",
"RANDOM,LINEAR,MARKER,ECG OR EEG!")
| bwrc/lsltools | lsltools/sim.py | Python | mit | 25,614 | [
"Gaussian"
] | 1df286255c80c54cf972157326c8950929a2ba14a82c0e1287fcad03c850b843 |
"""
fitness v0.01
- implements a fitness function base class
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import time
from operator import itemgetter
from math import exp
import sys
from logs import *
from cache import *
import logging
logging.basicConfig(level=logging.WARNING)
#logger = logging.getLogger(__name__)
logger = logging
class Fitness:
def __init__(self):
self.cache = cache()
self.cache_input = True
self.cache_results = False
self.cache_chart = False
self.logs = logs()
self.input_file_name = ""
self.text_summary = "" #text summary of the results
self.input_data = []
self.classification = []
self.input_data_length = 0
self.current_quartile = 0
self.period = 0
self.positions = []
self.score_only = False #set to true to only calculate what is required for scoring a strategy
#to speed up performance.
self.max_length = 10000000
self.enable_flash_crash_protection = False
self.flash_crash_protection_delay = False
self.reset()
return
def reset(self):
self.logs.reset()
self.score_balance = 0
self.period = 0
return
def load_input_data(self):
#base class provides a csv format input loader
#format of "N,value" , where N can be a index or timestamp
logger.info("loading data (csv format)")
self.input_data = None
if self.cache_input == True:
cache_label = self.input_file_name +'::'+str(self.max_length)
self.input_data = self.cache.get(cache_label)
if self.input_data == None:
f = open(self.input_file_name,'r')
d = f.readlines()
f.close()
if len(d) > self.max_length:
d = d[self.max_length * -1:]
self.input_data = []
for row in d[1:]:
v = row.split(',')[2] #volume
r = row.split(',')[1] #price
t = row.split(',')[0] #time
self.input_data.append([int(float(t)),float(r),float(v)])
logger.info("input data loaded from file-----")
if self.cache_input == True:
self.cache.set(cache_label,self.input_data)
self.cache.expire(cache_label,60*10)
else:
logger.info("cached data found: " + cache_label)
self.input_data_length = len(self.input_data)
logger.info("loaded data len:" + str(self.input_data_length))
return
def initialize(self):
#
# !!! WARNING !!!
#
# If the gene_def config can influence the classification make sure the cache label has a unique name!!!
self.classification_cache_label = str(hash(self)) + '::'+__name__+'_classification::'+str(self.max_length)
logger.info("initializing")
self.load_input_data()
if self.cache_input == False:
self.classify(self.input_data)
else:
cm = self.cache.get(self.classification_cache_label)
if cm == None:
logger.info("classifying input data")
self.classify(self.input_data)
if self.cache_input == True:
self.cache.set(self.classification_cache_label,self.classification)
self.cache.expire(self.classification_cache_label,60*15)
else:
logger.info("cached classification data found: " + self.classification_cache_label)
self.market_class = cm
self.classified_market_data = True
return self.current_quartile
def run(self):
logger.info("run")
#this function simply pushes the loaded data through the input function
for i in self.input_data:
self.input(i)
return
def test_quartile(self,quartile):
#valid inputs are 1-4
#tells the fitness function which quartile to test
#again the definition of what a quartile represents is left to the developer
#can be used to maintain four independant populations using the exact same fitness evaluation
#if classification is not used and the set quartile is ignored.
self.quartile = quartile
def classify(self,input_list):
#returns self.current_quartile which represents one of four possible states with a value of 0-3.
#classification preprocessor can split the input data into four quartiles
#every input must be binned to a quartile and added to the classification list
self.classification = []
self.classified_data = True
self.current_quartile = 0
return self.current_quartile
def score(self):
#scoring function
return self.score_balance
def get_target(self):
#used to return a target
target = 0
return target
def input(self,input_record):
self.period += 1 #increment the period counter
#example input logging:
#if not self.score_only:
# self.time = int(time_stamp * 1000)
# self.logs.append('price',[self.time,record])
return
def compress_log(self,log,lossless_compression = False, lossy_max_length = 2000):
#utility function to provide data compression
#lossless compression removes records with no change in value, before and after record n
#lossy compression selects the sample which deviates most from a moving average
#returns a compressed log
#compresses on index 1 of a list (ex. [time_stamp,value] )
compressible = True
while compressible:
compressible = False
ret_log = []
for i in xrange(len(log)):
if type(log[i][1]) == float:
log[i][1] = float("%.3f"%log[i][1])
if i >= 1 and i < len(log) - 1:
if log[i-1][1] == log[i][1] and log[i+1][1] == log[i][1]:
compressible = True #no change in value before or after, omit record
else:
ret_log.append(log[i])
else:
ret_log.append(log[i])
log = ret_log
if lossless_compression == True:
return ret_log
while len(log) > lossy_max_length:
avg = log[0][1]
avg = (log[0][1] - avg) * 0.2 + avg
ret_log = [log[0]] #capture the first record
for i in xrange(1,len(log),2):
#find which sample that deviates the most from the average
a = abs(log[i][1] - avg)
b = abs(log[i-1][1] - avg)
if a > b:
ret_log.append(log[i])
else:
ret_log.append(log[i-1])
#update the moving average
avg = (log[i-1][1] - avg) * 0.2 + avg
avg = (log[i][1] - avg) * 0.2 + avg
ret_log.append(log[len(log)-1]) #make sure the last record is captured
log = ret_log
return ret_log
def cache_output(self,cache_name,periods=80000):
#it's up to the developer what output data to cache
#example implementation:
#
#p = self.logs.get('price')
#if len(p) > periods:
# self.logs.prune_logs(p[-1*periods][0])
#self.logs.compress_logs(exclude_keys=['buy','sell','stop','trigger'],lossless_compression = False, max_lossy_length = 10000)
#self.cache.set(cache_name,self.logs.json())
return
def test():
te = Fitness()
te.input_file_name = "./datafeed/bcfeed_mtgoxUSD_1min.csv"
te.initialize()
te.test_quartile(1)
te.run()
return te
if __name__ == "__main__":
import pdb
import hotshot,hotshot.stats
print "fitness base class profile "
print " -- this is a test script to profile the performance of the fitness funciton base class"
print "Profiling...(This is going to take a while)"
class trade_engine(Fitness):
def __init__(self):
Fitness.__init__(self)
prof = hotshot.Profile("fitness.prof")
te = prof.runcall(test)
prof.close()
stats = hotshot.stats.load("fitness.prof")
stats.strip_dirs()
stats.sort_stats('time','calls')
stats.print_stats(20)
te = trade_engine()
print "Score:",te.score()
print "Done."
| OndroNR/ga-bitbot | libs/fitness.py | Python | gpl-3.0 | 9,169 | [
"Brian"
] | e728219028f96d817ced77a947b3309348dd938efbf15def37b032da4653ee61 |
from .fgir import *
from .optimize import FlowgraphOptimization
from .error import Warn
import asyncio
class PCodeOp(object):
'''A class interface for creating coroutines.
This helps us keep track of valid computational elements. Every coroutine in
a PCode object should be an method of PCodeOp.'''
@staticmethod
async def _node(in_qs, out_qs, func):
'''A helper function to create coroutines.
`in_qs`: an ordered list of asyncio.Queues() which hold the node's inputs.
`out_qs`: a list of asyncio.Queues() into which the function's output should go
`func`: the function to apply to the inputs which produces the output value'''
# TODO
# hint: look at asyncio.gather
# hint: the same return value of the function is put in every output queue
#out_qs = [out_qs[i].put(j) for i,j in enumerate(func(*[q.get() for q in in_qs]))]
inputs = []
for i in range(0,len(in_qs)):
nextin = await in_qs[i].get()
inputs.append(nextin)
retval = func(*inputs)
for i in range(0,len(out_qs)):
await out_qs[i].put(retval)
@staticmethod
async def forward(in_qs, out_qs):
def f(input):
return input
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def libraryfunction(in_qs, out_qs, function_ref):
def f(*inputs):
# TODO
return function_ref.__get__(inputs[0])(*inputs[1:])
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def librarymethod(in_qs, out_qs, method_ref):
def f(*inputs):
return method_ref.__get__(inputs[0])(*inputs[1:])
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def input(in_qs, out_qs):
def f(input):
return input
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def literal(out_qs, value_ref):
def f(*inputs):
return value_ref
await PCodeOp._node(in_qs, out_qs, f)
class PCode(object):
def __init__(self):
self.inputs = [] # ordered
self.outputs = [] # ordered
self.ops = [] # unordered
self.retvals = None
def add_op(self, pcode_op_coroutine):
self.ops.append( pcode_op_coroutine )
async def input_generator(self,input_args):
gen_coroutines = [q.put(i) for q,i in zip(self.inputs, input_args)]
await asyncio.gather(*gen_coroutines)
async def output_collector(self, future):
col_coroutines = [q.get() for q in self.outputs]
output_args = await asyncio.gather(*col_coroutines)
self.retvals = output_args
return output_args
async def driver(self, input_args, future):
_,value,*_ = await asyncio.gather(self.input_generator(input_args), self.output_collector(future), *self.ops)
future.set_result(value)
def run(self, *input_args):
return_future = asyncio.Future()
asyncio.ensure_future(self.driver(input_args, return_future))
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.run_until_complete(return_future)
return return_future.result()[0]
class PCodeGenerator(FlowgraphOptimization):
def __init__(self):
self.pcodes = {}
def visit(self, flowgraph):
pc = PCode()
# Create asyncio queues for every edge
# qs is indexed by tuples of the source and destination node ids
# for the inputs of a component, the source should be None
qs = {} # { (src,dst)=>asyncio.Queue(), ... }
# Populate qs by iterating over inputs of every node
for (nodeid, node) in flowgraph.nodes.items():
for src in node.inputs:
qs[src, nodeid] = asyncio.Queue()
# hint: destination nodes should be in flowgraph nodes
# hint: sources are their inputs
# Add an extra input queue for each component input
component_inputs = []
for dst in flowgraph.inputs:
q = asyncio.Queue()
component_inputs.append(q)
qs[(None,dst)] = q
qs[(None,dst)]._endpoints = (None,dst)
pc.inputs = component_inputs
# Now create all the coroutines from the nodes.
for (node_id,node) in flowgraph.nodes.items():
node_in_qs = [qs[src_id,node_id] for src_id in node.inputs]
out_ids = [i for (i,n) in flowgraph.nodes.items() if node_id in n.inputs]
node_out_qs = [qs[node_id,dst_id] for dst_id in out_ids]
if node.type==FGNodeType.forward:
pc.add_op( PCodeOp.forward(node_in_qs, node_out_qs) )
elif node.type==FGNodeType.libraryfunction:
pc.add_op( PCodeOp.libraryfunction(node_in_qs, node_out_qs, node.ref) )
elif node.type==FGNodeType.librarymethod:
pc.add_op( PCodeOp.librarymethod(node_in_qs, node_out_qs, node.ref) )
elif node.type==FGNodeType.input:
# Add an extra input queue for each component input
node_in_q = qs[(None,node_id)]
pc.add_op( PCodeOp.input([node_in_q], node_out_qs) )
elif node.type==FGNodeType.output:
# Remove the output node and just use its input queues directly.
pc.outputs = node_in_qs
elif node.type==FGNodeType.literal:
pc.add_op( PCodeOp.literal(node_out_qs, node.ref) )
self.pcodes[flowgraph.name] = pc
self.queues = qs
| Planet-Nine/cs207project | pype/pcode.py | Python | mit | 5,058 | [
"VisIt"
] | 02abecc112e32357192d692dbc4d121b81d1886a4438e0eccfa0468f4f8f13d9 |
import pickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
#import gnumpy as gnp
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
from frontend.label_modifier import HTSLabelModification
#from frontend.mlpg_fast import MLParameterGenerationFast
#from frontend.mlpg_fast_layer import MLParameterGenerationFastLayer
import configuration
from models.deep_rnn import DeepRecurrentNetwork
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
from io_funcs.binary_io import BinaryIOCollection
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) ## including input and output
plotlogger = logging.getLogger("plotting")
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i) + '_' + dnn.params[i].name
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
aa = dnn.params[i].get_value(borrow=True).T
print(aa.shape, aa.size)
if aa.size > aa.shape[0]:
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def load_covariance(var_file_dict, out_dimension_dict):
var = {}
io_funcs = BinaryIOCollection()
for feature_name in list(var_file_dict.keys()):
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
var_values = numpy.reshape(var_values, (out_dimension_dict[feature_name], 1))
var[feature_name] = var_values
return var
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False, var_dict=None,
cmp_mean_vector = None, cmp_std_vector = None, init_dnn_model_file = None, seq_dur_file_list = None):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layer_size = hyper_params['hidden_layer_size']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
model_type = hyper_params['model_type']
hidden_layer_type = hyper_params['hidden_layer_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
sequential_training = hyper_params['sequential_training']
dropout_rate = hyper_params['dropout_rate']
# sequential_training = True
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
if cfg.network_type != 'S2S':
seq_dur_file_list = None
if not seq_dur_file_list:
train_dur_file_list = None
valid_dur_file_list = None
else:
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, subphone_feats="coarse_coding")
train_dur_file_list = seq_dur_file_list[0:cfg.train_file_number]
valid_dur_file_list = seq_dur_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list, dur_file_list = train_dur_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, network_type=cfg.network_type, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, dur_file_list = valid_dur_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, network_type=cfg.network_type, shuffle = False)
if cfg.network_type == 'S2S':
shared_train_set_xyd, temp_train_set_x, temp_train_set_y, temp_train_set_d = train_data_reader.load_one_partition()
shared_valid_set_xyd, temp_valid_set_x, temp_valid_set_y, temp_valid_set_d = valid_data_reader.load_one_partition()
train_set_x, train_set_y, train_set_d = shared_train_set_xyd
valid_set_x, valid_set_y, valid_set_d = shared_valid_set_xyd
temp_train_set_f = label_normaliser.extract_durational_features(dur_data=temp_train_set_d)
temp_valid_set_f = label_normaliser.extract_durational_features(dur_data=temp_valid_set_d)
train_set_f = theano.shared(numpy.asarray(temp_train_set_f, dtype=theano.config.floatX), name='f', borrow=True)
valid_set_f = theano.shared(numpy.asarray(temp_valid_set_f, dtype=theano.config.floatX), name='f', borrow=True)
else:
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition()
train_set_x, train_set_y = shared_train_set_xy
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
# pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DeepRecurrentNetwork(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs,
L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type, output_type=cfg.output_layer_type, network_type=cfg.network_type, dropout_rate = dropout_rate)
if cfg.network_type == 'S2S':
train_fn, valid_fn = dnn_model.build_finetune_functions_S2SPF(
(train_set_x, train_set_y, train_set_d, train_set_f), (valid_set_x, valid_set_y, valid_set_d, valid_set_f))
else:
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y)) #, batch_size=batch_size
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.time()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
# finetune_lr = 0.000125
previous_finetune_lr = finetune_lr
print(finetune_lr)
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.time()
while (not train_data_reader.is_finish()):
if cfg.network_type == 'S2S':
shared_train_set_xyd, temp_train_set_x, temp_train_set_y, temp_train_set_d = train_data_reader.load_one_partition()
temp_train_set_f = label_normaliser.extract_durational_features(dur_data=temp_train_set_d)
train_set_d.set_value(numpy.asarray(temp_train_set_d, dtype='int32'), borrow=True)
train_set_f.set_value(numpy.asarray(temp_train_set_f, dtype=theano.config.floatX), borrow=True)
else:
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
# if sequential training, the batch size will be the number of frames in an utterance
if sequential_training == True:
#batch_size = temp_train_set_x.shape[0]
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
this_train_error = train_fn(current_finetune_lr, current_momentum)
train_error.append(this_train_error)
#print train_set_x.eval().shape, train_set_y.eval().shape, this_train_error
else:
n_train_batches = temp_train_set_x.shape[0] / batch_size
for index in range(n_train_batches):
## send a batch to the shared variable, rather than pass the batch size and batch index to the finetune function
train_set_x.set_value(numpy.asarray(temp_train_set_x[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
this_train_error = train_fn(current_finetune_lr, current_momentum)
train_error.append(this_train_error)
train_data_reader.reset()
logger.debug('calculating validation loss')
validation_losses = []
while (not valid_data_reader.is_finish()):
if cfg.network_type == 'S2S':
shared_valid_set_xyd, temp_valid_set_x, temp_valid_set_y, temp_valid_set_d = valid_data_reader.load_one_partition()
temp_valid_set_f = label_normaliser.extract_durational_features(dur_data=temp_valid_set_d)
valid_set_d.set_value(numpy.asarray(temp_valid_set_d, dtype='int32'), borrow=True)
valid_set_f.set_value(numpy.asarray(temp_valid_set_f, dtype=theano.config.floatX), borrow=True)
else:
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition()
valid_set_x.set_value(numpy.asarray(temp_valid_set_x, dtype=theano.config.floatX), borrow=True)
valid_set_y.set_value(numpy.asarray(temp_valid_set_y, dtype=theano.config.floatX), borrow=True)
this_valid_loss = valid_fn()
validation_losses.append(this_valid_loss)
valid_data_reader.reset()
this_validation_loss = numpy.mean(validation_losses)
this_train_valid_loss = numpy.mean(numpy.asarray(train_error))
sub_end_time = time.time()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
if epoch > 5:
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
# logger.debug('validation loss decreased, so saving model')
if this_validation_loss >= previous_loss:
logger.debug('validation loss increased')
# dbn = best_dnn_model
early_stop += 1
if epoch > 15 and early_stop > early_stop_epoch:
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.time()
# cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def dnn_generation_S2S(valid_file_list, valid_dur_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, subphone_feats="coarse_coding")
for i in range(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
fid_lab = open(valid_dur_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
test_set_d = features.astype(numpy.int32)
dur_features = label_normaliser.extract_durational_features(dur_data=test_set_d)
test_set_f = dur_features.astype(numpy.float32)
predicted_parameter = dnn_model.parameter_prediction_S2SPF(test_set_x, test_set_d, test_set_f)
#print b_indices
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def dnn_generation_lstm(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
visualize_dnn(dnn_model)
file_number = len(valid_file_list)
for i in range(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction_lstm(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layer_size = cfg.hyper_params['hidden_layer_size']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
lab_dim = label_normaliser.dimension + cfg.appended_input_dim
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
dur_file_list = prepare_file_path_list(file_id_list, cfg.in_dur_dir, cfg.dur_ext)
seq_dur_file_list = prepare_file_path_list(file_id_list, cfg.in_seq_dur_dir, cfg.dur_ext)
lf0_file_list = prepare_file_path_list(file_id_list, cfg.in_lf0_dir, cfg.lf0_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s_%d.dat' %(cfg.label_style, lab_dim)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.GenTestList:
try:
test_id_list = read_file_list(cfg.test_id_scp)
logger.debug('Loaded file id list from %s' % cfg.test_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.test_id_scp)
raise
in_label_align_file_list = prepare_file_path_list(test_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(test_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(test_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(test_id_list, nn_label_norm_dir, cfg.lab_ext)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list, label_type=cfg.label_type)
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
if cfg.GenTestList:
min_max_normaliser.load_min_max_values(label_norm_file)
else:
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
### make duration data for S2S network ###
if cfg.network_type == "S2S":
logger.info('creating duration (input) features for S2S network')
label_normaliser.prepare_dur_data(in_label_align_file_list, seq_dur_file_list, feature_type="numerical", unit_size="phoneme")
if cfg.remove_silence_from_dur:
remover = SilenceRemover(n_cmp = cfg.seq_dur_dim, silence_pattern = cfg.silence_pattern, remove_frame_features = cfg.add_frame_features)
remover.remove_silence(seq_dur_file_list, in_label_align_file_list, seq_dur_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.items():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in range(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.items():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.values():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None and not cfg.GenTestList:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output duration data
if cfg.MAKEDUR:
logger.info('creating duration (output) features')
feature_type = cfg.dur_feature_type
label_normaliser.prepare_dur_data(in_label_align_file_list, dur_file_list, feature_type)
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = cfg.delta_win #[-0.5, 0.0, 0.5]
acc_win = cfg.acc_win #[1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
if 'dur' in list(cfg.in_dir_dict.keys()) and cfg.AcousticModel:
acoustic_worker.make_equal_frames(dur_file_list, lf0_file_list, cfg.in_dimension_dict)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim,
binary_label_file_list, lab_dim, silence_feature)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
in_label_align_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number]) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in list(cfg.out_dimension_dict.keys()):
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number])
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_var_vector = feature_std_vector**2
feature_var_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
lab_dim = label_normaliser.dimension + cfg.appended_input_dim
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layer_size))
for hid_size in hidden_layer_size:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.%f.rnn.model' \
%(model_dir, cfg.combined_model_name, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number, cfg.hyper_params['learning_rate'])
### DNN model training
if cfg.TRAINDNN:
var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict)
logger.info('training DNN')
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_mean_vector = cmp_min_max[0, ]
cmp_std_vector = cmp_min_max[1, ]
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict,
cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector, seq_dur_file_list=seq_dur_file_list)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
if cfg.GENBNFEA:
'''
Please only tune on this step when you want to generate bottleneck features from DNN
'''
temp_dir_name = '%s_%s_%d_%d_%d_%d_%s_hidden' \
%(cfg.model_type, cfg.combined_feature_name, \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), combined_model_arch)
gen_dir = os.path.join(gen_dir, temp_dir_name)
bottleneck_size = min(hidden_layers_sizes)
bottleneck_index = 0
for i in range(len(hidden_layers_sizes)):
if hidden_layers_sizes(i) == bottleneck_size:
bottleneck_index = i
logger.info('generating bottleneck features from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_d_file_list = seq_dur_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index)
### generate parameters from DNN
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%d_%d' \
%(cfg.combined_model_name, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layer_size), hidden_layer_size[0], hidden_layer_size[-1])
gen_dir = os.path.join(gen_dir, temp_dir_name)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_d_file_list = seq_dur_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.GenTestList:
gen_file_id_list = test_id_list
test_x_file_list = nn_label_norm_file_list
test_d_file_list = seq_dur_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
if cfg.network_type == "S2S":
dnn_generation_S2S(test_x_file_list, test_d_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
else:
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if cfg.AcousticModel:
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG)
if cfg.DurationModel:
### Perform duration normalization(min. state dur set to 1) ###
gen_dur_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.dur_ext)
gen_label_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.lab_ext)
in_gen_label_align_file_list = prepare_file_path_list(gen_file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
label_modifier = HTSLabelModification(silence_pattern = cfg.silence_pattern)
label_modifier.modify_duration_labels(in_gen_label_align_file_list, gen_dur_list, gen_label_list)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
print(len(gen_file_id_list))
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list, cfg) # reference copy synthesis speech
### setting back to original conditions before calculating objective scores ###
if cfg.GenTestList:
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
### evaluation: RMSE and CORR for duration
if cfg.CALMCD and cfg.DurationModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_dur_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.dur_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_dur_list, cfg.dur_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.dur_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features)
remover.remove_silence(in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_dur_list)
valid_dur_rmse, valid_dur_corr = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
test_dur_rmse, test_dur_corr = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
logger.info('Develop: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(valid_dur_rmse, valid_dur_corr))
logger.info('Test: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(test_dur_rmse, test_dur_corr))
### evaluation: calculate distortion
if cfg.CALMCD and cfg.AcousticModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_f0_corr, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_f0_corr, test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_f0_corr, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_f0_corr, test_vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
# if gnp._boardId is not None:
# import gpu_lock
# gpu_lock.free_lock(gnp._boardId)
sys.exit(0)
| bajibabu/merlin | src/work_in_progress/run_merlin_v2.py | Python | apache-2.0 | 58,040 | [
"NEURON"
] | ac97443739432a8473ba697444d27c264c134a56611224c5c6e07aacef882b37 |
import vtk
from math import sqrt
from vtk.test import Testing
plane = vtk.vtkPlane()
plane.SetOrigin(5,5,9.8)
plane.SetNormal(0,0,1)
coords = [(0,0,0),(10,0,0),(10,10,0),(0,10,0),
(0,0,10),(10,0,10),(10,10,10),(0,10,10),
(5,0,0),(10,5,0),(5,10,0),(0,5,0),
(5,0,9.5),(10,5,9.3),(5,10,9.5),(0,5,9.3),
(0,0,5),(10,0,5),(10,10,5),(0,10,5)]
data = vtk.vtkFloatArray()
points = vtk.vtkPoints()
ptIds = vtk.vtkIdList()
mesh = vtk.vtkUnstructuredGrid()
mesh.SetPoints(points)
mesh.GetPointData().SetScalars(data)
for id in range(0,20):
x=coords[id][0]
y=coords[id][1]
z=coords[id][2]
ptIds.InsertNextId(id)
points.InsertNextPoint(x,y,z)
data.InsertNextValue(sqrt(x*x + y*y + z*z))
mesh.InsertNextCell(vtk.VTK_QUADRATIC_HEXAHEDRON,ptIds)
ptIds.Reset()
for id in range(0,8):
x=coords[id][0] + 20
y=coords[id][1] + 20
z=coords[id][2]
ptIds.InsertNextId(id + 20)
points.InsertNextPoint(x,y,z)
data.InsertNextValue(sqrt(x*x + y*y + z*z))
mesh.InsertNextCell(vtk.VTK_HEXAHEDRON,ptIds)
print( "Mesh bounding box : ({0})".format( mesh.GetPoints().GetBounds() ) )
triCutter = vtk.vtkCutter()
triCutter.SetInputData(mesh)
triCutter.SetCutFunction(plane)
triCutter.GenerateTrianglesOn()
triCutter.Update()
print( "Triangle cutter bounding box : ({0})".format( triCutter.GetOutput().GetPoints().GetBounds() ) )
polyCutter = vtk.vtkCutter()
polyCutter.SetInputData(mesh)
polyCutter.SetCutFunction(plane)
polyCutter.GenerateTrianglesOff()
polyCutter.Update()
print( "Polygon cutter bounding box : ({0})".format( polyCutter.GetOutput().GetPoints().GetBounds() ) )
# Display input and output side-by-side
meshMapper = vtk.vtkDataSetMapper()
meshMapper.SetInputData(mesh)
meshActor=vtk.vtkActor()
meshActor.SetMapper(meshMapper)
meshRen = vtk.vtkRenderer()
meshRen.AddActor(meshActor)
meshRen.SetViewport(0.0,0.0,0.5,1.0)
triCutMapper = vtk.vtkPolyDataMapper()
triCutMapper.SetInputData(triCutter.GetOutput())
triCutActor=vtk.vtkActor()
triCutActor.SetMapper(triCutMapper)
triCutActor.GetProperty().EdgeVisibilityOn()
triCutActor.GetProperty().SetEdgeColor(1,1,1)
triCutRen = vtk.vtkRenderer()
triCutRen.AddActor(triCutActor)
triCutRen.SetViewport(0.5,0.5,1.0,1.0)
polyCutMapper = vtk.vtkPolyDataMapper()
polyCutMapper.SetInputData(polyCutter.GetOutput())
polyCutActor=vtk.vtkActor()
polyCutActor.SetMapper(polyCutMapper)
polyCutActor.GetProperty().EdgeVisibilityOn()
polyCutActor.GetProperty().SetEdgeColor(1,1,1)
polyCutRen = vtk.vtkRenderer()
polyCutRen.AddActor(polyCutActor)
polyCutRen.SetViewport(0.5,0.0,1.0,0.5)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(meshRen)
renWin.AddRenderer(triCutRen)
renWin.AddRenderer(polyCutRen)
renWin.SetSize(800,400)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
iren.Initialize()
| hlzz/dotfiles | graphics/VTK-7.0.0/Filters/Core/Testing/Python/cutPolygons.py | Python | bsd-3-clause | 2,949 | [
"VTK"
] | a8e92b2189be7cbb51b1e82cec1ae6a2aff51af6f080230245da246ca95c9762 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Lee-Ping Wang
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import itertools
import numpy as np
from mdtraj.utils import ensure_type, cast_indices, in_units_of
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.utils.six import string_types
from mdtraj.utils.six.moves import xrange
__all__ = ['ArcTrajectoryFile', 'load_arc']
##############################################################################
# Classes
##############################################################################
class _EOF(IOError):
pass
@_FormatRegistry.register_loader('.arc')
def load_arc(filename, top=None, stride=None, atom_indices=None):
"""Load a TINKER .arc file from disk.
Parameters
----------
filename : str
String filename of TINKER .arc file.
top : {str, Trajectory, Topology}
The .arc format does not contain topology information. Pass in either
the path to a pdb file, a trajectory, or a topology to supply this
information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
See Also
--------
mdtraj.ArcTrajectoryFile : Low level interface to TINKER .arc files
"""
from mdtraj.trajectory import _parse_topology, Trajectory
# we make it not required in the signature, but required here. although this
# is a little weird, its good because this function is usually called by a
# dispatch from load(), where top comes from **kwargs. So if its not supplied
# we want to give the user an informative error message
if top is None:
raise ValueError('"top" argument is required for load_arc')
if not isinstance(filename, string_types):
raise TypeError('filename must be of type string for load_arc. '
'you supplied %s' % type(filename))
topology = _parse_topology(top)
atom_indices = _cast_indices(atom_indices)
if atom_indices is not None:
topology = topology.subset(atom_indices)
with ArcTrajectoryFile(filename) as f:
xyz = f.read(stride=stride, atom_indices=atom_indices)
in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(xyz))
if stride is not None:
# if we loaded with a stride, the Trajectories's time field should
# respect that
time *= stride
t = Trajectory(xyz=xyz, topology=topology, time=time)
return t
@_FormatRegistry.register_fileobject('.arc')
class ArcTrajectoryFile(object):
"""Interface for reading and writing to an TINKER archive files.
(Note that the TINKER .xyz format is identical to this.) This is
a file-like object, that both reading or writing depending on the
`mode` flag. It implements the context manager protocol, so you
can also use it with the python 'with' statement.
The conventional units in the arc file is angstrom. The format only
supports storing the cartesian coordinates and box lengths.
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r'}
The mode in which to open the file, only 'r' for read is supported.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
"""Open an TINKER.arc file for reading/writing.
"""
self._is_open = False
self._filename = filename
self._mode = mode
if mode == 'w':
raise ValueError('Writing TINKER .arc files is not supported at this time')
# track which line we're on. this is not essential, but its useful
# when reporting errors to the user to say what line it occured on.
self._line_counter = 0
if mode == 'r':
# if n_atoms is None:
# raise ValueError('To open a mdcrd file in mode="r", you must '
# 'supply the number of atoms, "n_atoms"')
if not os.path.exists(filename):
raise IOError("The file '%s' doesn't exist" % filename)
self._fh = open(filename, 'r')
self._is_open = True
else:
raise ValueError('mode must be "r". '
'you supplied "%s"' % mode)
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
raise NotImplementedError()
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
raise NotImplementedError()
def close(self):
"""Close the .arc file"""
if self._is_open:
self._fh.close()
self._is_open = False
def __del__(self):
self.close()
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
def __len__(self):
"Number of frames in the file"
raise NotImplementedError()
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a TINKER .arc file. Note that only the
Cartesian coordinates are read in. The .arc file also
contains TINKER-specific numeric atom types and some bonding
information, which we do not read in.
Parameters
----------
n_frames : int, None
The number of frames you would like to read from the file.
If None, all of the remaining frames will be loaded.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates
from the file.
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3), dtype=np.float32
The cartesian coordinates, in angstroms
"""
if not self._mode == 'r':
raise ValueError('read() is only available when file is opened '
'in mode="r"')
if n_frames is None:
frame_counter = itertools.count()
else:
frame_counter = xrange(n_frames)
if stride is None:
stride = 1
coords = []
for i in frame_counter:
try:
coord = self._read()
if atom_indices is not None:
coord = coord[atom_indices, :]
except _EOF:
break
coords.append(coord)
for j in range(stride - 1):
# throw away these frames
self._read()
coords = np.array(coords)
return coords
def _read(self):
"Read a single frame"
i = 0
# Read in the number of atoms.
line = self._fh.readline()
if line == '':
raise _EOF()
self._n_atoms = int(line.split()[0])
self._line_counter += 1
coords = np.empty((self._n_atoms, 3), dtype=np.float32)
while i < self._n_atoms:
line = self._fh.readline()
s = line.split()
coords[i,:] = [float(s[pos]) for pos in [2, 3, 4]]
i += 1
self._line_counter += 1
return coords
def write(self, xyz):
""" The ArcTrajectoryFile does not have a write method,
because TINKER .arc files have special numerical atom types
which are not shared by any other trajectory file format.
Parameters
----------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms to write.
"""
raise RuntimeError('write() is not available for .arc files')
| marscher/mdtraj | MDTraj/formats/arc.py | Python | lgpl-2.1 | 9,800 | [
"MDTraj",
"TINKER"
] | 10a37f3107c6eab0c8ea857cfbefa8560f66399fd9276cbaf0271f0497f9fceb |
# -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2016-03-26 22:42:39
# @Last modified by: LC
# @Last Modified time: 2016-04-11 15:22:41
# @Email: liangchaowu5@gmail.com
# 功能:单线程下载搜狗输入法的词库,使用时把主函数中的baseDir改成自己的下载目录即可,注意baseDir末尾不能有/
import urllib2
import Queue
import re
import os
import time
import downloadSingleFile
import getCategory
def downloadSingleCate(cateID,dir,logFile):
pageBaseUrl = 'http://pinyin.sogou.com/dict/cate/index/%s' % cateID
fileBaseUrl = 'http://download.pinyin.sogou.com'
pagePattern = re.compile(r'href="/dict/cate/index/%s/default(.*?)"' % cateID) # 非贪婪匹配,查找跳转到其他页面的url
filePattern = re.compile(r'href="http://download.pinyin.sogou.com(.*?)"') # 非贪婪匹配,查找可下载的文件
visited = [] # 记录某个url是否已经被访问了
downloaded = [] # 记录某个文件是否被下载了
queue = Queue.Queue() # 创建一个FIFO队列,用于存放待遍历的url(bfs)
# bfs 查找所有的url,队列不为空时可以一直遍历
queue.put(pageBaseUrl) # 将当前页面也就是访问的第一个页面放到队列中
while not queue.empty():
currentURL = queue.get()
if currentURL in visited:
continue
else:
visited.append(currentURL)
try:
response = urllib2.urlopen(currentURL)
data = response.read()
except urllib2.HTTPError, e:
with open(logFile.decode('utf8'), 'a') as f:
f.write(str(e.code)+' error while parsing page of '+currentURL+'\n')
except:
with open(logFile.decode('utf8'), 'a') as f:
f.write('unexcepted error while parsing page of '+currentURL+'\n')
pageResult = re.findall(pagePattern, data)
for i in range(len(pageResult)):
queue.put(pageBaseUrl + '/default' + pageResult[i])
# 查找并下载文件
# 指定下载目录,目录不存在时自动创建,需要在前面加上u,指定编码为utf-8
if not os.path.exists(dir.decode('utf8')): # dir 为str类型,但是创建目录# 必须要用
os.makedirs(dir.decode('utf8')) # 创建多层目录
fileResult = re.findall(filePattern, data)
for later in fileResult:
fileURL = fileBaseUrl+later
if fileURL in downloaded:
continue
else:
downloaded.append(fileURL)
print fileURL+' downloading.......'
downloadSingleFile.downLoadSingleFile(fileURL, dir, logFile)
for visit in visited:
print visit
if __name__ == '__main__':
start = time.time()
bigCateDict, smallCateDict = getCategory.getSogouDictCate()
baseDir = 'G:/搜狗词库/单线程下载'
logFile = baseDir+'/download.log'
for i in bigCateDict:
for j in smallCateDict[i]:
downloadDir = baseDir+'/%s/%s/' %(bigCateDict[i],smallCateDict[i][j])
downloadSingleCate(int(j), downloadDir, logFile)
print 'process time:%s' % (time.time() - start) | ltf/lab | plab/thesaurus/spider/SougouThesaurusSpider/singleThreadDownload.py | Python | gpl-3.0 | 3,214 | [
"VisIt"
] | e851fd9408ba1f87e7b0c8a2b5bb6fb6661765fb2425776d9bcbec136d0580b5 |
# -*- coding: utf-8 -*-
"""
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: http://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/
"""
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('jinja2/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='Jinja2',
version=version,
url='http://jinja.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='A small but fast and easy to use stand-alone template '
'engine written in pure python.',
long_description=__doc__,
# jinja is egg safe. But we hate eggs
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['jinja2'],
install_requires=['MarkupSafe'],
extras_require={'i18n': ['Babel>=0.8']},
include_package_data=True,
entry_points="""
[babel.extractors]
jinja2 = jinja2.ext:babel_extract[i18n]
"""
)
| patricmutwiri/jinja2 | setup.py | Python | bsd-3-clause | 2,614 | [
"VisIt"
] | 1db89241e6f21af26ff16089bc7d1d8f3d992274b21125b47f6929fc62a59900 |
# encoding: utf-8
"""
filters.py -- Custom signal filtering functions
Exported namespace: amplitude_envelope, halfwave, quick_boxcar, circular_blur
Created by Joe Monaco on 2007-11-15. Updated 2009-09-11.
Copyright (c) 2007-2009 Columbia University. All rights reserved.
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
from numpy import r_, empty, zeros, ceil, trapz, ndarray, squeeze, sqrt
from scipy.signal import gaussian, convolve, hilbert
def amplitude_envelope(x):
"""Compute the amplitude envelope of a time-varying sinusoid signal
Pass in multiple signals as a matrix with time along the second axis.
"""
if x.ndim == 1:
x = x.reshape((1, x.size))
A = N.empty(x.shape, 'd')
for i in xrange(x.shape[0]):
x_a = hilbert(x[i])
A[i] = sqrt(x_a.real**2 + x_a.imag**2)
return squeeze(A)
def halfwave(x, copy=False):
"""Half-wave rectifier for arrays or scalars
NOTE: Specify copy=True if array data should be copied before performing
halwave rectification.
"""
if type(x) is ndarray and x.ndim:
if copy:
x = x.copy()
x[x<0.0] = 0.0
else:
x = float(x)
if x < 0.0:
x = 0.0
return x
def quick_boxcar(s, M=4, centered=False):
"""Returns a boxcar-filtered version of the input signal
Keyword arguments:
M -- number of averaged samples (default 4)
centered -- recenter the filtered signal to reduce lag (default False)
"""
# Sanity check on signal and filter window
length = s.shape[0]
if length <= 2*M:
raise ValueError, 'signal too short for specified filter window'
# Set up staggered arrays for vectorized average
z = empty((M, length+M-1), 'd')
for i in xrange(M):
z[i] = r_[zeros(i)+s[0], s, zeros(M-i-1)+s[-1]]
# Center the average if specified
start_ix = 0
end_ix = length
if centered:
start_ix += int(M/2)
end_ix += int(M/2)
return z.mean(axis=0)[start_ix:end_ix]
def circular_blur(s, blur_width):
"""Return a wrapped gaussian smoothed (blur_width in degrees) signal for
data binned on a full circle range [0, 2PI/360).
"""
bins = s.shape[0]
width = blur_width / (360.0/bins)
size = ceil(8*width)
if size > bins:
size = bins
wrapped = r_[s[-size:], s, s[:size]]
G = gaussian(size, width)
G /= trapz(G)
S = convolve(wrapped, G, mode='same')
return S[size:-size]
| jdmonaco/vmo-feedback-model | src/tools/filters.py | Python | mit | 2,661 | [
"Gaussian"
] | 61d18dd539766b125fd1a96e7dfbfc64ca81c17ab75bab80d57bdca617c4ddb8 |
general_midi = {
1: "Acoustic Grand Piano",
2: "Bright Acoustic Piano",
3: "Electric Grand Piano",
4: "Honky-tonk Piano",
5: "Electric Piano 1",
6: "Electric Piano 2",
7: "Harpsichord",
8: "Clavinet",
9: "Celesta",
10: "Glockenspiel",
11: "Music Box",
12: "Vibraphone",
13: "Marimba",
14: "Xylophone",
15: "Tubular Bells",
16: "Dulcimer",
17: "Drawbar Organ",
18: "Percussive Organ",
19: "Rock Organ",
20: "Church Organ",
21: "Reed Organ",
22: "Accordion",
23: "Harmonica",
24: "Tango Accordion",
25: "Acoustic Guitar (nylon)",
26: "Acoustic Guitar (steel)",
27: "Electric Guitar (jazz)",
28: "Electric Guitar (clean)",
29: "Electric Guitar (muted)",
30: "Overdriven Guitar",
31: "Distortion Guitar",
32: "Guitar harmonics",
33: "Acoustic Bass",
34: "Electric Bass (finger)",
35: "Electric Bass (pick)",
36: "Fretless Bass",
37: "Slap Bass 1",
38: "Slap Bass 2",
39: "Synth Bass 1",
40: "Synth Bass 2",
41: "Violin",
42: "Viola",
43: "Cello",
44: "Contrabass",
45: "Tremolo Strings",
46: "Pizzicato Strings",
47: "Orchestral Harp",
48: "Timpani",
49: "String Ensemble 1",
50: "String Ensemble 2",
51: "SynthStrings 1",
52: "SynthStrings 2",
53: "Choir Aahs",
54: "Voice Oohs",
55: "Synth Voice",
56: "Orchestra Hit",
57: "Trumpet",
58: "Trombone",
59: "Tuba",
60: "Muted Trumpet",
61: "French Horn",
62: "Brass Section",
63: "SynthBrass 1",
64: "SynthBrass 2",
65: "Soprano Sax",
66: "Alto Sax",
67: "Tenor Sax",
68: "Baritone Sax",
69: "Oboe",
70: "English Horn",
71: "Bassoon",
72: "Clarinet",
73: "Piccolo",
74: "Flute",
75: "Recorder",
76: "Pan Flute",
77: "Blown Bottle",
78: "Shakuhachi",
79: "Whistle",
80: "Ocarina",
81: "Lead 1 (square)",
82: "Lead 2 (sawtooth)",
83: "Lead 3 (calliope)",
84: "Lead 4 (chiff)",
85: "Lead 5 (charang)",
86: "Lead 6 (voice)",
87: "Lead 7 (fifths)",
88: "Lead 8 (bass + lead)",
89: "Pad 1 (new age)",
90: "Pad 2 (warm)",
91: "Pad 3 (polysynth)",
92: "Pad 4 (choir)",
93: "Pad 5 (bowed)",
94: "Pad 6 (metallic)",
95: "Pad 7 (halo)",
96: "Pad 8 (sweep)",
97: "FX 1 (rain)",
98: "FX 2 (soundtrack)",
99: "FX 3 (crystal)",
100: "FX 4 (atmosphere)",
101: "FX 5 (brightness)",
102: "FX 6 (goblins)",
103: "FX 7 (echoes)",
104: "FX 8 (sci-fi)",
105: "Sitar",
106: "Banjo",
107: "Shamisen",
108: "Koto",
109: "Kalimba",
110: "Bag pipe",
111: "Fiddle",
112: "Shanai",
113: "Tinkle Bell",
114: "Agogo",
115: "Steel Drums",
116: "Woodblock",
117: "Taiko Drum",
118: "Melodic Tom",
119: "Synth Drum",
120: "Reverse Cymbal",
121: "Guitar Fret Noise",
122: "Breath Noise",
123: "Seashore",
124: "Bird Tweet",
125: "Telephone Ring",
126: "Helicopter",
127: "Applause",
128: "Gunshot"}
| JasonFruit/abcviewer | abcv/general_midi.py | Python | mit | 3,121 | [
"CRYSTAL"
] | 1a71adebfca2315446382bc61de99f5769d9f3642ca9db8f7327f70b4c960baa |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import sys
sys.setrecursionlimit(1500)
VERSION = '2.2.x'
# Add VTK 6.2 to path
sys.path.insert(0,'/Users/vistrails/src/VTK6/build/Wrapping/Python')
sys.path.insert(0,'/Users/vistrails/src/VTK6/build/lib/')
plist = dict(
CFBundleName='VisTrails',
CFBundleShortVersionString=VERSION,
CFBundleGetInfoString=' '.join(['VisTrails', VERSION]),
CFBundleExecutable='vistrails',
CFBundleIdentifier='org.vistrails',
)
sys.path.append('../../..')
APP = ['../../../vistrails/run.py']
#comma-separated list of additional data files and
#folders to include (not for code!)
#DATA_FILES = ['/usr/local/graphviz-2.12/bin/dot',]
#removed gridifield: gridfield, gridfield.core, gridfield.algebra, gridfield.gfvis, gridfield.selfe, \
OPTIONS = {'argv_emulation': True,
'iconfile': 'resources/vistrails_icon.icns',
'includes': 'sip,pylab,xml,libxml2,libxslt,Cookie,BaseHTTPServer,\
multifile,shelve,uuid,sine,st,Numeric,pexpect,\
sqlite3,suds,shapelib,dbflib,\
mpl_toolkits.mplot3d,_mysql_exceptions,readline,\
HTMLParser,sqlalchemy,sqlalchemy.dialects.sqlite,\
sqlalchemy.dialects.mysql,sqlalchemy.dialects.postgresql,\
sqlalchemy.dialects.firebird,sqlalchemy.dialects.mssql,\
sqlalchemy.dialects.oracle,sqlalchemy.dialects.sybase,\
sqlalchemy.dialects.drizzle,certifi,backports.ssl_match_hostname,\
tej',
'packages': 'PyQt4,vtk,MySQLdb,matplotlib,vistrails,numpy,scipy,\
api,twisted,Scientific,distutils,h5py,batchq,osgeo,\
nose,IPython,zmq,pygments,pyth,psycopg2,remoteq,\
file_archive,sklearn',
'excludes': 'mpl_toolkits.basemap,PyQt4.uic,PyQt4.uic.Compiler,\
PyQt4.uic.Loader,PyQt4.uic.port_v2,PyQt4.uic.port_v3',
'plist': plist,
}
setup(
app=APP,
# data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| hjanime/VisTrails | scripts/dist/mac/setup.py | Python | bsd-3-clause | 4,169 | [
"VTK"
] | f63c18875adde9a8b723783640b25632363a6839572aecf52d09994cab6940e0 |
import moose
from IPython import embed
# Try to connect pool to species
pool = moose.Pool('/pool')
species = moose.Species('/species')
embed()
species.pool = pool
| rahulgayatri23/moose-core | python/libmumbl/test/test1.py | Python | gpl-3.0 | 163 | [
"MOOSE"
] | 75df22b10383b5b901f539ef18d1112a9eb95552115dee828bfefda94094a2fc |
#!/usr/bin/env python
import sys
import logging
import argparse
from Bio import SeqIO
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
def translate(fasta_file, target="protein", table=11, strip_stops=False, met=False):
records = list(SeqIO.parse(fasta_file, "fasta"))
for record in records:
if target == "protein":
mod = len(record.seq) % 3
if mod != 0:
record.seq = record.seq[0:-mod]
# Read http://biopython.org/DIST/docs/api/Bio.Seq.Seq-class.html#transcribe
# for valid CDS conditions.
# Will first try to translate sequence as a CDS,
# then just as a sequence if this fails.
try:
tmpseq = record.seq.translate(table=table, cds=True)
except CodonTable.TranslationError as cte:
log.info("Translation issue at %s: %s", record.id, cte)
tmpseq = record.seq.translate(table=table, cds=False)
# check if stop in middle of protein
if "*" in tmpseq:
log.info(
"Trimming %s from %s to %s due to stop codons",
record.id,
len(record.seq),
3 * len(tmpseq) - 3,
)
tmpseq = tmpseq[0 : str(tmpseq).index("*")]
# add stop to end if strip_stops=False
if not strip_stops:
tmpseq = tmpseq + "*"
if met:
tmpseq = "M" + tmpseq[1:]
record.seq = tmpseq
if len(record.seq) > 0:
SeqIO.write(record, sys.stdout, "fasta")
else:
record.seq = record.seq.transcribe()
SeqIO.write(record, sys.stdout, "fasta")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Translate fasta file")
parser.add_argument("fasta_file", type=argparse.FileType("r"), help="Fasta file")
parser.add_argument("--target", choices=["protein", "rna"])
parser.add_argument(
"--table",
type=int,
default=11,
help="Translation table to use",
choices=range(1, 23),
)
parser.add_argument(
"--strip_stops", action="store_true", help="Remove stop characters"
)
parser.add_argument(
"--met", action="store_true", help="Convert first residue to Met"
)
args = parser.parse_args()
translate(**vars(args))
| TAMU-CPT/galaxy-tools | tools/fasta/fasta_translate.py | Python | gpl-3.0 | 2,495 | [
"Biopython"
] | 5a8e73d43002f9384222b5ea5cb23acbeb2a93616a7d525353d9fa317ec71486 |
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are
noted.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings or raise
exceptions when an error occurs. By default this is disabled; to
query and control the current error handling state the following
functions are provided.
.. autosummary::
:toctree: generated/
geterr -- Get the current way of handling special-function errors.
seterr -- Set how special-function errors are handled.
errstate -- Context manager for special-function error handling.
SpecialFunctionWarning -- Warning that can be emitted by special functions.
SpecialFunctionError -- Exception that can be raised by special functions.
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1
ellipkinc -- Incomplete elliptic integral of the first kind
ellipe -- Complete elliptic integral of the second kind
ellipeinc -- Incomplete elliptic integral of the second kind
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and complex argument.
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and real argument.
yv -- Bessel function of the second kind of real order and complex argument.
yve -- Exponentially scaled Bessel function of the second kind of real order.
kn -- Modified Bessel function of the second kind of integer order `n`
kv -- Modified Bessel function of the second kind of real order `v`
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the first kind
hankel1 -- Hankel function of the first kind
hankel1e -- Exponentially scaled Hankel function of the first kind
hankel2 -- Hankel function of the second kind
hankel2e -- Exponentially scaled Hankel function of the second kind
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0
it2j0y0 -- Integrals related to Bessel functions of order 0
iti0k0 -- Integrals of modified Bessel functions of order 0
it2i0k0 -- Integrals related to modified Bessel functions of order 0
besselpoly -- [+]Weighted integral of a Bessel function.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative density function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x)
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd
gdtr -- Gamma distribution cumulative density function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function
pdtrc -- Poisson survival function
pdtri -- Inverse to `pdtr` vs m
pdtrik -- Inverse to `pdtr` vs k
stdtr -- Student t distribution cumulative density function
stdtridf -- Inverse of `stdtr` vs df
stdtrit -- Inverse of `stdtr` vs `t`
chdtr -- Chi square cumulative distribution function
chdtrc -- Chi square survival function
chdtri -- Inverse to `chdtrc`
chdtriv -- Inverse to `chdtr` vs `v`
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x
chndtr -- Non-central chi square cumulative distribution function
chndtridf -- Inverse to `chndtr` vs `df`
chndtrinc -- Inverse to `chndtr` vs `nc`
chndtrix -- Inverse to `chndtr` vs `x`
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function
smirnovi -- Inverse to `smirnov`
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution
kolmogi -- Inverse function to kolmogorov
tklmbda -- Tukey-Lambda cumulative distribution function
logit -- Logit ufunc for ndarrays.
expit -- Expit ufunc for ndarrays.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
owens_t -- Owen's T Function.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out])
poch -- Rising factorial (z)_m
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals
modfresnelm -- Modified Fresnel negative integrals
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z).
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre function of the first kind for complex arguments.
lpn -- [+]Legendre function of the first kind.
lqn -- [+]Legendre function of the second kind.
lpmn -- [+]Sequence of associated Legendre functions of the first kind.
lqmn -- [+]Sequence of associated Legendre functions of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l)
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l)
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial.
chebyt -- [+]Chebyshev polynomial of the first kind.
chebyu -- [+]Chebyshev polynomial of the second kind.
chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- [+]Jacobi polynomial.
laguerre -- [+]Laguerre polynomial.
genlaguerre -- [+]Generalized (associated) Laguerre polynomial.
hermite -- [+]Physicist's Hermite polynomial.
hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial.
gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial.
sh_legendre -- [+]Shifted Legendre polynomial.
sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- [+]Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x)
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind
hyp0f1 -- Confluent hypergeometric limit function 0F1.
hyp2f0 -- Hypergeometric function 2F0 in y and an error estimate
hyp1f2 -- Hypergeometric function 1F2 and error estimate
hyp3f0 -- Hypergeometric function 3F0 in y and an error estimate
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D
pbvv -- Parabolic cylinder function V
pbwa -- Parabolic cylinder function W
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions
mathieu_b -- Characteristic value of odd Mathieu functions
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative
mathieu_sem -- Odd Mathieu function and its derivative
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function
obl_cv -- Characteristic value of oblate spheroidal function
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers
kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`
beip -- Derivative of the Kelvin function `bei`
ker -- Kelvin function ker
kei -- Kelvin function ker
kerp -- Derivative of the Kelvin function ker
keip -- Derivative of the Kelvin function kei
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]The number of combinations of N things taken k at a time.
perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and Related Functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n
exp1 -- Exponential integral E_1 of complex argument z
expi -- Exponential integral Ei
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- [+]Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`
exp10 -- 10**x
exp2 -- 2**x
radian -- Convert from degrees to radians
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero
expm1 -- exp(x) - 1 for use when `x` is near zero.
cosm1 -- cos(x) - 1 for use when `x` is near zero.
round -- Round to nearest integer
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from .sf_error import SpecialFunctionWarning, SpecialFunctionError
from ._ufuncs import *
from .basic import *
from ._logsumexp import logsumexp
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,
spherical_kn)
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/special/__init__.py | Python | gpl-3.0 | 27,475 | [
"Gaussian"
] | f1aaad7dc887dd88a14850633fff595bf5b56738645381afd683be596cb59699 |
import numpy as np
import matplotlib.pyplot as plt
import emcee
import batman
def _gaus(x, a, b, x0, sigma):
"""
Simple Gaussian function
Parameters
----------
x : float or 1-d numpy array
The data to evaluate the Gaussian over
a : float
the amplitude
b : float
the constant offset
x0 : float
the center of the Gaussian
sigma : float
the width of the Gaussian
Returns
-------
Array or float of same type as input (x).
"""
return a * np.exp(-(x - x0)**2 / (2 * sigma**2)) + b
def RemoveTransit(data):
'''
Use M-A model parameters, subtract from light curve
'''
return flatdata
def FitBumps(data, nspots=4):
'''
Use emcee to fit data with N spots in given transit
'''
return bestfitmodel
def Datum2Lon(t):
'''
Given parameters of system:
Prot, Porb, ephem, etc
Convert a given time (or array of times) *in transit* to star surface coordinates (lon, lat)
'''
return (lon, lat)
| jradavenport/ThirdWay | thirdway/thirdway.py | Python | mit | 1,017 | [
"Gaussian"
] | 1fa4a53b1bd98b2d08c03bb1c86adcf1d6171a037db1581339e0d1414129f3e7 |
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This provides useful general math tools.
Functions:
fcmp Compare two floating point numbers, up to a specified precision.
intd Represent a floating point number as an integer.
safe_log log, but returns an arbitrarily small number for log(0).
safe_exp exp, but returns a large or small number instead of overflows.
"""
import math
def fcmp(x, y, precision):
"""fcmp(x, y, precision) -> -1, 0, or 1"""
if math.fabs(x-y) < precision:
return 0
elif x < y:
return -1
return 1
def intd(x, digits_after_decimal=0):
"""intd(x[, digits_after_decimal]) -> int x, rounded
Represent a floating point number with some digits after the
decimal point as an integer. This is useful when floating point
comparisons are failing due to precision problems. e.g.
intd(5.35, 1) -> 54.
"""
precision = 10.**digits_after_decimal
if x >= 0:
x = int(x * precision + 0.5)
else:
x = int(x * precision - 0.5)
return x
def safe_log(n, zero=None, neg=None):
"""safe_log(n, zero=None, neg=None) -> log(n)
Calculate the log of n. If n is 0, returns the value of zero. If n is
negative, returns the value of neg.
"""
if n < 0:
return neg
elif n < 1E-100:
return zero
return math.log(n)
LOG2 = math.log(2)
def safe_log2(n, zero=None, neg=None):
"""safe_log2(n, zero=None, neg=None) -> log(n)
Calculate the log base 2 of n. If n is 0, returns the value of
zero. If n is negative, returns the value of neg.
"""
l = safe_log(n, zero=zero, neg=neg)
if l is None:
return l
return l/LOG2
def safe_exp(n, under=None, over=None):
"""safe_exp(n, under=None, over=None) -> e**n
Guaranteed not to overflow. Instead of overflowing, it returns
the values of 'under' for underflows or 'over' for overflows.
"""
try:
return math.exp(n)
except OverflowError:
if n < 0:
return under
return over
raise "How did I get here?"
# Try and load C implementations of functions. If I can't,
# then just ignore and use the pure python implementations.
try:
from cmathfns import *
except ImportError:
pass
| dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/mathfns.py | Python | apache-2.0 | 2,452 | [
"Biopython"
] | 51ce17408ea06e5c3afe6397219926dc8a1c6ef9ad2523f1aa8686eb8b26f04a |
######################################################################
#
# Functions to calculate integration points and weights for Gaussian
# quadrature
#
# x,w = gaussxw(N) returns integration points x and integration
# weights w such that sum_i w[i]*f(x[i]) is the Nth-order
# Gaussian approximation to the integral int_{-1}^1 f(x) dx
# x,w = gaussxwab(N,a,b) returns integration points and weights
# mapped to the interval [a,b], so that sum_i w[i]*f(x[i])
# is the Nth-order Gaussian approximation to the integral
# int_a^b f(x) dx
#
# This code finds the zeros of the nth Legendre polynomial using
# Newton's method, starting from the approximation given in Abramowitz
# and Stegun 22.16.6. The Legendre polynomial itself is evaluated
# using the recurrence relation given in Abramowitz and Stegun
# 22.7.10. The function has been checked against other sources for
# values of N up to 1000. It is compatible with version 2 and version
# 3 of Python.
#
# Written by Mark Newman <mejn@umich.edu>, June 4, 2011
# You may use, share, or modify this file freely
#
######################################################################
from numpy import ones,copy,cos,tan,pi,linspace
def gaussxw(N):
# Initial approximation to roots of the Legendre polynomial
a = linspace(3,4*N-1,N)/(4*N+2)
x = cos(pi*a+1/(8*N*N*tan(a)))
# Find roots using Newton's method
epsilon = 1e-15
delta = 1.0
while delta>epsilon:
p0 = ones(N,float)
p1 = copy(x)
for k in range(1,N):
p0,p1 = p1,((2*k+1)*x*p1-k*p0)/(k+1)
dp = (N+1)*(p0-x*p1)/(1-x*x)
dx = p1/dp
x -= dx
delta = max(abs(dx))
# Calculate the weights
w = 2*(N+1)*(N+1)/(N*N*(1-x*x)*dp*dp)
return x,w
def gaussxwab(N,a,b):
x,w = gaussxw(N)
return 0.5*(b-a)*x+0.5*(b+a),0.5*(b-a)*w
| KiMiralles/Python-Learning | Computational Physics Newman/Book Resources/gaussxw.py | Python | gpl-3.0 | 1,896 | [
"Gaussian"
] | 9b3a2fef5f94dbaf193ce52ef65f1474d3d2dd1858d4e5747b44ccca497860be |
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Demonstrate categorical Markov chain with hidden Markov model (HMM)
"""
import numpy as np
import matplotlib.pyplot as plt
from bayespy.nodes import Gaussian, \
CategoricalMarkovChain, \
Dirichlet, \
Mixture, \
Categorical
from bayespy.inference.vmp.vmp import VB
import bayespy.plot as bpplt
def hidden_markov_model(distribution, *args, K=3, N=100):
# Prior for initial state probabilities
alpha = Dirichlet(1e-3*np.ones(K),
name='alpha')
# Prior for state transition probabilities
A = Dirichlet(1e-3*np.ones(K),
plates=(K,),
name='A')
# Hidden states (with unknown initial state probabilities and state
# transition probabilities)
Z = CategoricalMarkovChain(alpha, A,
states=N,
name='Z')
# Emission/observation distribution
Y = Mixture(Z, distribution, *args,
name='Y')
Q = VB(Y, Z, alpha, A)
return Q
def mixture_model(distribution, *args, K=3, N=100):
# Prior for state probabilities
alpha = Dirichlet(1e-3*np.ones(K),
name='alpha')
# Cluster assignments
Z = Categorical(alpha,
plates=(N,),
name='Z')
# Observation distribution
Y = Mixture(Z, distribution, *args,
name='Y')
Q = VB(Y, Z, alpha)
return Q
@bpplt.interactive
def run(N=200, maxiter=10, seed=42, std=2.0, plot=True):
# Use deterministic random numbers
if seed is not None:
np.random.seed(seed)
#
# Generate data
#
mu = np.array([ [0,0], [3,4], [6,0] ])
K = 3
p0 = np.ones(K) / K
q = 0.9 # probability to stay in the same state
r = (1-q)/(K-1)
P = q*np.identity(K) + r*(np.ones((3,3))-np.identity(3))
y = np.zeros((N,2))
z = np.zeros(N)
state = np.random.choice(K, p=p0)
for n in range(N):
z[n] = state
y[n,:] = std*np.random.randn(2) + mu[state]
state = np.random.choice(K, p=P[state])
plt.figure()
# Plot data
plt.subplot(1,3,1)
plt.axis('equal')
plt.title('True classification')
colors = [ [[1,0,0], [0,1,0], [0,0,1]][int(state)] for state in z ]
plt.plot(y[:,0], y[:,1], 'k-', zorder=-10)
plt.scatter(y[:,0], y[:,1], c=colors, s=40)
#
# Use HMM
#
# Run VB inference for HMM
Q_hmm = hidden_markov_model(Gaussian, mu, K*[std**(-2)*np.identity(2)],
K=K, N=N)
Q_hmm['Y'].observe(y)
Q_hmm.update(repeat=maxiter)
# Plot results
plt.subplot(1,3,2)
plt.axis('equal')
plt.title('Classification with HMM')
colors = Q_hmm['Y'].parents[0]._message_to_child()[0]
plt.plot(y[:,0], y[:,1], 'k-', zorder=-10)
plt.scatter(y[:,0], y[:,1], c=colors, s=40)
#
# Use mixture model
#
# For comparison, run VB for Gaussian mixture
Q_mix = mixture_model(Gaussian, mu, K*[std**(-2)*np.identity(2)],
K=K, N=N)
Q_mix['Y'].observe(y)
Q_mix.update(repeat=maxiter)
# Plot results
plt.subplot(1,3,3)
plt.axis('equal')
plt.title('Classification with mixture')
colors = Q_mix['Y'].parents[0]._message_to_child()[0]
plt.plot(y[:,0], y[:,1], 'k-', zorder=-10)
plt.scatter(y[:,0], y[:,1], c=colors, s=40)
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["n=",
"seed=",
"std=",
"maxiter="])
except getopt.GetoptError:
print('python demo_lssm.py <options>')
print('--n=<INT> Number of data vectors')
print('--std=<FLT> Standard deviation of the Gaussians')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--std":
kwargs["std"] = float(arg)
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
else:
raise ValueError("Unhandled option given")
run(**kwargs)
plt.show()
| SalemAmeen/bayespy | bayespy/demos/hmm.py | Python | mit | 4,839 | [
"Gaussian"
] | e37dca3820568745f853fef5adce5decb7bb12d195701f3ec2bb240c13f5d283 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
translate.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingOutputRasterLayer)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class gdaladdo(GdalAlgorithm):
INPUT = 'INPUT'
LEVELS = 'LEVELS'
CLEAN = 'CLEAN'
RESAMPLING = 'RESAMPLING'
FORMAT = 'FORMAT'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.methods = ((self.tr('Nearest Neighbour (default)'), 'nearest'),
(self.tr('Average'), 'average'),
(self.tr('Gaussian'), 'gauss'),
(self.tr('Cubic Convolution'), 'cubic'),
(self.tr('B-Spline Convolution'), 'cubicspline'),
(self.tr('Lanczos Windowed Sinc'), 'lanczos'),
(self.tr('Average MP'), 'average_mp'),
(self.tr('Average in Mag/Phase Space'), 'average_magphase'),
(self.tr('Mode'), 'mode'))
self.formats = (self.tr('Internal (if possible)'),
self.tr('External (GTiff .ovr)'),
self.tr('External (ERDAS Imagine .aux)'))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBoolean(self.CLEAN,
self.tr('Remove all existing overviews'),
defaultValue=False))
if GdalUtils.version() < 230000:
self.addParameter(QgsProcessingParameterString(self.LEVELS,
self.tr('Overview levels'),
defaultValue='2 4 8 16'))
params = []
if GdalUtils.version() >= 230000:
params.append(QgsProcessingParameterString(self.LEVELS,
self.tr('Overview levels (e.g. 2 4 8 16)'),
defaultValue=None,
optional=True))
params.append(QgsProcessingParameterEnum(self.RESAMPLING,
self.tr('Resampling method'),
options=[i[0] for i in self.methods],
allowMultiple=False,
defaultValue=None,
optional=True))
params.append(QgsProcessingParameterEnum(self.FORMAT,
self.tr('Overviews format'),
options=self.formats,
allowMultiple=False,
defaultValue=0,
optional=True))
params.append(QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True))
for p in params:
p.setFlags(p.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(p)
self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT, self.tr('Pyramidized')))
def name(self):
return 'overviews'
def displayName(self):
return self.tr('Build overviews (pyramids)')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-overview.png'))
def commandName(self):
return 'gdaladdo'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
fileName = inLayer.source()
arguments = [fileName]
if self.RESAMPLING in parameters and parameters[self.RESAMPLING] is not None:
arguments.append('-r')
arguments.append(self.methods[self.parameterAsEnum(parameters, self.RESAMPLING, context)][1])
ovrFormat = self.parameterAsEnum(parameters, self.FORMAT, context)
if ovrFormat == 1:
arguments.append('-ro')
elif ovrFormat == 2:
arguments.extend('--config USE_RRD YES'.split(' '))
if self.parameterAsBoolean(parameters, self.CLEAN, context):
arguments.append('-clean')
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.extend(self.parameterAsString(parameters, self.LEVELS, context).split(' '))
self.setOutputValue(self.OUTPUT, fileName)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| pblottiere/QGIS | python/plugins/processing/algs/gdal/gdaladdo.py | Python | gpl-2.0 | 6,874 | [
"Gaussian"
] | 5f3ec163a1953ca80418be20363f6ec64f7db0358b1b320b432cdf57e2357d96 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['y', 'yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['n', 'no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class _SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Set):
return list(obj)
return super(_SetEncoder, self).default(obj)
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in self.params:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except:
e = get_exception()
self.fail_json(path=path, msg='chattr failed', details=str(e))
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6,
},
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3,
},
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO,
}
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self, spec=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
for (k, v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif check_invalid_arguments and k not in self._legal_inputs:
unsupported_parameters.add(k)
# clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
self.fail_json(msg="Unsupported parameters for (%s) module: %s. Supported parameters include: %s" % (self._name,
','.join(sorted(list(unsupported_parameters))),
','.join(sorted(self.argument_spec.keys()))))
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field]) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
FALSEY = frozenset(BOOLEANS_FALSE)
overlap = FALSEY.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
TRUTHY = frozenset(BOOLEANS_TRUE)
overlap = TRUTHY.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ",".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
value = param[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
param[k] = type_checker(value)
except (TypeError, ValueError):
e = get_exception()
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" % (k, type(value), wanted, e))
# deal with sub options to create sub spec
spec = None
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec:
self._check_required_arguments(spec, param[k])
self._check_argument_types(spec, param[k])
self._check_argument_values(spec, param[k])
def _set_defaults(self, pre=True):
for (k, v) in self.argument_spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for (k, v) in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or isinstance(arg, bool):
return arg
if isinstance(arg, string_types):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='%s is not a valid boolean. Valid booleans include: %s' % (to_text(arg), ','.join(['%s' % x for x in BOOLEANS])))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, cls=_SetEncoder)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, cls=_SetEncoder)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
if 'changed' not in kwargs:
kwargs['changed'] = False
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
if 'changed' not in kwargs:
kwargs['changed'] = False
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError:
e = get_exception()
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc())
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e), exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([shlex_quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell:
shell = True
elif isinstance(args, (binary_type, text_type)):
if not use_unsafe_shell:
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
if executable is None:
executable = os.environ.get('SHELL')
if executable:
args = [executable, '-c', args]
else:
shell = True
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in to_clean_args:
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(shlex_quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception:
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| jonathonwalz/ansible | lib/ansible/module_utils/basic.py | Python | gpl-3.0 | 104,709 | [
"VisIt"
] | 1d58b4f33a49cd69bb5b11948628bb2d719869f7f6897b44c5a4ab3b16bf35e2 |
#!/usr/bin/env python
################################################################################
# hapClassifier.py is script to classify sickle cell haplotypes
# Copyright (c) [2017] [Elmutaz Shaikho Elhaj Mohammed]
################################################################################
#!/usr/local/bin/
import vcf
import subprocess
import sys
import pysam
import os
## This script takes the input file as first argument and output file as second argument
## python hapClassifier.py input(bgzipped and tabix indexed file) output file(text file name)
## Check input and out put files
if len(sys.argv) < 3:
print "Error: one or more arguement is/are missing"
print "python hapClassifier.py input(bgzipped and tabix indexed file) output file(text file name)"
exit(1)
else:
## Define input and output files
inputFile = sys.argv[1]
outputFile = sys.argv[2]
print 'Input file:'+ sys.argv[1]
print 'Output file:'+ sys.argv[2]
## Open output file for writing Haps Classes
target = open(outputFile, 'w')
### read vcf.gz
vcf_reader = vcf.Reader(filename=inputFile)
## Get all samples
samples = vcf_reader.samples
## Define a list to fetch SNPs by position. coordinates in snpList are zero-based
## Coordinates Should be according to Human Genome Reference b37
snpList = [5291562, 5269798, 5263682, 5260457]
## get 4 SNPs records needed for classification
records=[record for i in snpList for record in vcf_reader.fetch('11', i,(i+1))]
## Check in records is empty
if not records:
print "Error: Data may does not have the SNPs needed for classification,\n \
or SNPs locations and/or RSID do not match Humanan genome Reference b37 dbSNP build 141"
exit(1)
## Check if the data is phased or not, if not EXIT and print error messege
call = record.genotype(samples[0])
if not call.phased:
print "Error: Data is not phased"
exit(1)
## Defining halpotype Dictionaries using RSID or Chromosome:Position as SNP ID
if "rs" in records[0].ID:
AI_HET = {'rs3834466' : 'GT','rs28440105': 'C', 'rs10128556' : 'T' , 'rs968857' : 'T'}
SEN_HET = {'rs3834466' : 'G','rs28440105' : 'C', 'rs10128556' : 'T' , 'rs968857' : 'T'}
BEN_HET = {'rs3834466' : 'G','rs28440105' : 'C', 'rs10128556' : 'C' , 'rs968857' : 'T'}
CAR_HET = {'rs3834466' : 'G','rs28440105' : 'C', 'rs10128556' : 'C' , 'rs968857' : 'C'}
CAM_HET = {'rs3834466' : 'G','rs28440105' : 'A', 'rs10128556' : 'C' , 'rs968857' : 'T'}
print 'success'
else:
AI_HET = {'11:5291563' : 'GT','11:5269799': 'C', '11:5263683' : 'T' , '11:5260458' : 'T'}
SEN_HET = {'11:5291563' : 'G','11:5269799' : 'C', '11:5263683' : 'T' , '11:5260458' : 'T'}
BEN_HET = {'11:5291563' : 'G','11:5269799' : 'C', '11:5263683' : 'C' , '11:5260458' : 'T'}
CAR_HET = {'11:5291563' : 'G','11:5269799' : 'C', '11:5263683' : 'C' , '11:5260458' : 'C'}
CAM_HET = {'11:5291563' : 'G','11:5269799' : 'A', '11:5263683' : 'C' , '11:5260458' : 'T'}
## Form hap classes dictionaries from the data
for smpl in samples:
keys = keys =[records[i].ID for i in range(len(records))]
values_M = [(records[i].genotype(smpl).gt_bases).split('|')[0] for i in range(len(records))]
values_F = [(records[i].genotype(smpl).gt_bases).split('|')[1] for i in range(len(records))]
Hapclass_M = dict(zip(keys, values_M))
Hapclass_F = dict(zip(keys, values_F))
classF=""
classM=""
if Hapclass_M == Hapclass_F:
if Hapclass_M == Hapclass_F == BEN_HET:
target.write( '\t'.join( [smpl, 'BEN HOMO'] ) + '\n' )
elif Hapclass_M == Hapclass_F == SEN_HET:
target.write( '\t'.join( [smpl, 'SEN HOMO'] ) + '\n' )
elif Hapclass_M == Hapclass_F == AI_HET:
target.write( '\t'.join( [smpl, 'AI HOMO'] ) + '\n' )
elif Hapclass_M == Hapclass_F == CAR_HET:
target.write( '\t'.join( [smpl, 'CAR HOMO'] ) + '\n' )
elif Hapclass_M == Hapclass_F == CAM_HET:
target.write( '\t'.join( [smpl, 'CAM HOMO'] ) + '\n' )
else:
target.write( '\t'.join( [smpl, 'UNK HOMO'] ) + '\n' )
###
if Hapclass_F and (Hapclass_M != Hapclass_F) :
if Hapclass_F == BEN_HET:
classF = 'BEN'
elif Hapclass_F == SEN_HET:
classF = 'SEN'
elif Hapclass_F == AI_HET:
classF = 'AI'
elif Hapclass_F == CAR_HET:
classF = 'CAR'
elif Hapclass_F == CAM_HET:
classF = 'CAM'
else:
classF = 'UNK'
###
if Hapclass_M and (Hapclass_M != Hapclass_F) :
if Hapclass_M == BEN_HET:
classM = 'BEN'
elif Hapclass_M == SEN_HET:
classM = 'SEN'
elif Hapclass_M == AI_HET:
classM = 'AI'
elif Hapclass_M == CAR_HET:
classM = 'CAR'
elif Hapclass_M == CAM_HET:
classM = 'CAM'
else:
classM = 'UNK'
if Hapclass_M != Hapclass_F:
target.write( smpl +'\t' + classF + '/' + classM + '\n' )
target.close()
## Change hetro classification e.g. BEN/CAM and CAM/BEN to be only one of them BEN/CAM
sub = subprocess.call(['sed', '-i.bak', r"s/BEN\/SEN/SEN\/BEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/BEN\/AI/AI\/BEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/BEN\/CAR/CAR\/BEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/BEN\/CAM/CAM\/BEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/AI\/SEN/SEN\/AI/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/SEN\/CAR/CAR\/SEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/SEN\/CAM/CAM\/SEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/CAR\/AI/AI\/CAR/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/CAM\/AI/AI\/CAM/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/CAM\/CAR/CAR\/CAM/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/BEN\/UNK/UNK\/BEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/SEN\/UNK/UNK\/SEN/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/CAM\/UNK/UNK\/CAM/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/CAR\/UNK/UNK\/CAR/g", outputFile])
sub = subprocess.call(['sed', '-i.bak', r"s/AI\/UNK/UNK\/AI/g", outputFile])
## Remove backup file
os.remove(outputFile+r".bak")
| eshaikho/haplotypeClassifier | hapClassifier.py | Python | mit | 6,003 | [
"pysam"
] | 42dc6da4d695706e89ffe2be63cfb1d400d4e8ac503f9c5c3f12b203c95f64f9 |
#
#@BEGIN LICENSE
#
# cctransort by Psi4 Developer, a plugin to:
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
"""Plugin docstring.
"""
__version__ = '0.1'
__author__ = 'Psi4 Developer'
# Load Python modules
from pymodule import *
# Load C++ plugin
import os
import psi4
plugdir = os.path.split(os.path.abspath(__file__))[0]
sofile = plugdir + '/' + os.path.split(plugdir)[1] + '.so'
psi4.plugin_load(sofile)
| lothian/cctransort | __init__.py | Python | gpl-2.0 | 1,151 | [
"Psi4"
] | 8c1b73889f2af4f030a41176318b9ab8fa285c0940b9475fc9b9c8b608360a61 |
# Copyright (C) 2012,2013,2016,2018
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Calculates the Cosine potential as:
.. math::
U = K (1 + cos(\\theta - \\theta_0))
.. function:: espressopp.interaction.Cosine(K, theta0)
:param real K: (default: 1.0)
:param real theta0: (default: 0.0)
.. function:: espressopp.interaction.FixedTripleListCosine(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedTripleListCosine.getFixedTripleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedTripleListCosine.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.AngularPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_Cosine, interaction_FixedTripleListCosine
class CosineLocal(AngularPotentialLocal, interaction_Cosine):
def __init__(self, K=1.0, theta0=0.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_Cosine, K, theta0)
class FixedTripleListCosineLocal(InteractionLocal, interaction_FixedTripleListCosine):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleListCosine, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getFixedTripleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class Cosine(AngularPotential):
pmiproxydefs = dict(
cls = 'espressopp.interaction.CosineLocal',
pmiproperty = ['K', 'theta0']
)
class FixedTripleListCosine(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListCosineLocal',
pmicall = ['setPotential','getFixedTripleList']
)
| espressopp/espressopp | src/interaction/Cosine.py | Python | gpl-3.0 | 3,359 | [
"ESPResSo"
] | 7191bad3f202be6a30fa898cf9f6728ba3ff690ef7367961fb8420677fec8f18 |
import unittest
import configparser
import os
import tempfile
import socket
from IPy import IP
from kagent_utils import KConfig
class TestKConfig(unittest.TestCase):
# server section
url = 'http://localhost:1337/'
path_login = 'login/path'
path_register = 'register/path'
path_ca_host = 'ca/host/path'
path_heartbeat = 'heartbeat/path'
username = 'username'
server_password = 'server_password'
# agent section
host_id = 'host_0'
restport = '8080'
heartbeat_interval = '3'
services_file = 'path/to/services/file'
watch_interval = '4s'
bin_dir = 'path/to/bin/dir'
sbin_dir = 'path/to/sbin/dir'
pid_file = 'path/to/pid/file'
agent_log_dir = 'path/to/agent/logs'
csr_log_file = 'path/to/csr/log_file'
logging_level = 'DEBUG'
max_log_size = '100'
hostname = 'myhostname'
group_name = 'group'
hadoop_home = 'path/to/hadoop_home'
certs_dir = 'path/to/certs_dir'
certs_user = 'cert_user'
certificate_file = 'path/to/certificate'
key_file = 'path/to/key'
hops_ca_cert_file = 'path/to/file'
server_keystore = 'path/to/server_keystore'
server_truststore = 'path/to/server_truststore'
keystore_script = 'path/to/keystore_script'
state_store = 'path/to/state_store'
agent_password = 'agent_password'
conda_dir = 'path/to/conda'
conda_user = 'conda_user'
conda_envs_blacklist = 'python27,python35,hops-system'
conda_gc_interval = '2h'
private_ip = '127.0.0.1'
public_ip = '192.168.0.1'
elk_key_file = 'path/to/certs_dir/elastic_admin.key'
elk_certificate_file = 'path/to/certs_dir/elastic_admin.pem'
elk_cn = 'ELkAdmin'
elastic_host_certificate = 'path/to/certs_dir/elastic_host.pem'
def setUp(self):
self.config_file = tempfile.mkstemp(prefix='kagent_config_')
def tearDown(self):
os.remove(self.config_file[1])
def test_parse_full_config(self):
self._prepare_config_file(True)
config = KConfig(self.config_file[1])
config.load()
config.read_conf()
self.assertEqual(self.url, config.server_url)
self.assertEqual(self._toUrl(self.path_login), config.login_url)
self.assertEqual(self._toUrl(self.path_register), config.register_url)
self.assertEqual(self._toUrl(self.path_ca_host), config.ca_host_url)
self.assertEqual(self._toUrl(self.path_heartbeat), config.heartbeat_url)
self.assertEqual(self.username, config.server_username)
self.assertEqual(self.server_password, config.server_password)
self.assertEqual(self.host_id, config.host_id)
self.assertEqual(int(self.restport), config.rest_port)
self.assertEqual(int(self.heartbeat_interval), config.heartbeat_interval)
self.assertEqual(self.services_file, config.services_file)
self.assertEqual(self.watch_interval, config.watch_interval)
self.assertEqual(self.bin_dir, config.bin_dir)
self.assertEqual(self.sbin_dir, config.sbin_dir)
self.assertEqual(self.pid_file, config.agent_pidfile)
self.assertEqual(self.agent_log_dir, config.agent_log_dir)
self.assertEqual(self.csr_log_file, config.csr_log_file)
self.assertEqual(self.logging_level, config.logging_level_str)
self.assertEqual(int(self.max_log_size), config.max_log_size)
self.assertEqual(self.private_ip, config.private_ip)
self.assertEqual(self.public_ip, config.public_ip)
self.assertEqual(self.hostname, config.hostname)
self.assertEqual(self.group_name, config.group_name)
self.assertEqual(self.hadoop_home, config.hadoop_home)
self.assertEqual(self.certs_dir, config.certs_dir)
self.assertEqual(self.certs_user, config.certs_user)
self.assertEqual(self.certificate_file, config.certificate_file)
self.assertEqual(self.key_file, config.key_file)
self.assertEqual(self.server_keystore, config.server_keystore)
self.assertEqual(self.server_truststore, config.server_truststore)
self.assertEqual(self.keystore_script, config.keystore_script)
self.assertEqual(self.state_store, config.state_store_location)
self.assertEqual(self.agent_password, config.agent_password)
self.assertEqual(self.conda_dir, config.conda_dir)
self.assertEqual(self.conda_user, config.conda_user)
self.assertEqual(self.conda_envs_blacklist,
config.conda_envs_blacklist)
self.assertEqual(self.conda_gc_interval, config.conda_gc_interval)
self.assertEqual(self.elk_key_file, config.elk_key_file)
self.assertEqual(self.elk_certificate_file, config.elk_certificate_file)
self.assertEqual(self.elk_cn, config.elk_cn)
self.assertEqual(self.elastic_host_certificate, config.elastic_host_certificate)
self.assertEqual(self.hops_ca_cert_file, config.hops_ca_cert_file)
# Let KConfig figure out values for these properties
def test_parse_partial_config(self):
self._prepare_config_file(False)
config = KConfig(self.config_file[1])
config.load()
config.read_conf()
self.assertIsNotNone(config.agent_password)
self.assertNotEqual('', config.agent_password)
my_hostname = socket.gethostbyaddr(self.private_ip)[0]
self.assertEqual(my_hostname, config.hostname)
self.assertEqual(my_hostname, config.host_id)
def test_alternate_host(self):
alternate_host = "https://alternate.url:443/"
self._prepare_config_file(False)
config = KConfig(self.config_file[1])
config.load()
config.server_url = alternate_host
config.read_conf()
self.assertEqual(alternate_host, config.server_url)
register_path = config._config.get('server', 'path-register')
self.assertEqual(alternate_host + register_path, config.register_url)
def _prepare_config_file(self, all_keys):
config = configparser.ConfigParser()
config['server'] = {
'url': self.url,
'path-login': self.path_login,
'path-register': self.path_register,
'path-ca-host': self.path_ca_host,
'path-heartbeat': self.path_heartbeat,
'username': self.username,
'password': self.server_password
}
if all_keys:
config['agent'] = {
'host-id': self.host_id,
'restport': self.restport,
'heartbeat-interval': self.heartbeat_interval,
'services-file': self.services_file,
'watch-interval': self.watch_interval,
'bin-dir': self.bin_dir,
'sbin-dir': self.sbin_dir,
'pid-file': self.pid_file,
'agent-log-dir': self.agent_log_dir,
'csr-log-file': self.csr_log_file,
'logging-level': self.logging_level,
'max-log-size': self.max_log_size,
'hostname': self.hostname,
'group-name': self.group_name,
'hadoop-home': self.hadoop_home,
'certs-dir': self.certs_dir,
'certs-user': self.certs_user,
'certificate-file': self.certificate_file,
'key-file': self.key_file,
'server-keystore': self.server_keystore,
'server-truststore': self.server_truststore,
'keystore-script': self.keystore_script,
'state-store': self.state_store,
'password': self.agent_password,
'conda-dir': self.conda_dir,
'conda-user': self.conda_user,
'conda-envs-blacklist': self.conda_envs_blacklist,
'conda-gc-interval': self.conda_gc_interval,
'private-ip': self.private_ip,
'public-ip': self.public_ip,
'elk-key-file' : self.elk_key_file,
'elk-certificate-file' : self.elk_certificate_file,
'elk-cn' : self.elk_cn,
'elastic-host-certificate': self.elastic_host_certificate,
'hops_ca-cert-file': self.hops_ca_cert_file
}
else:
config['agent'] = {
'restport': self.restport,
'heartbeat-interval': self.heartbeat_interval,
'services-file': self.services_file,
'watch-interval': self.watch_interval,
'bin-dir': self.bin_dir,
'sbin-dir': self.sbin_dir,
'pid-file': self.pid_file,
'agent-log-dir': self.agent_log_dir,
'csr-log-file': self.csr_log_file,
'logging-level': self.logging_level,
'max-log-size': self.max_log_size,
'group-name': self.group_name,
'hadoop-home': self.hadoop_home,
'certs-dir': self.certs_dir,
'certs-user': self.certs_user,
'certificate-file': self.certificate_file,
'key-file': self.key_file,
'server-keystore': self.server_keystore,
'server-truststore': self.server_truststore,
'keystore-script': self.keystore_script,
'state-store': self.state_store,
'password': self.agent_password,
'conda-dir': self.conda_dir,
'conda-user': self.conda_user,
'conda-envs-blacklist': self.conda_envs_blacklist,
'conda-gc-interval': self.conda_gc_interval,
'private-ip': self.private_ip,
'public-ip': self.public_ip,
'elk-key-file' : self.elk_key_file,
'elk-certificate-file' : self.elk_certificate_file,
'elk-cn' : self.elk_cn,
'elastic-host-certificate': self.elastic_host_certificate,
'hops_ca-cert-file': self.hops_ca_cert_file
}
with open(self.config_file[1], 'w') as config_fd:
config.write(config_fd)
def _toUrl(self, path):
return self.url + path
if __name__ == "__main__":
unittest.main()
| karamelchef/kagent-chef | files/default/kagent_utils/tests/test_kagent_config.py | Python | gpl-3.0 | 10,234 | [
"Elk"
] | 9d0e019fe698cfd74770757ce4d1e115e4f3be6d47af942e452399d02e153c66 |
# -*- coding: utf-8 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
.. module:: astm.asynclib
:synopsis: Forked version of asyncore mixed with asynchat.
.. moduleauthor:: Sam Rushing <rushing@nightmare.com>
.. sectionauthor:: Christopher Petrilli <petrilli@amber.org>
.. sectionauthor:: Steve Holden <sholden@holdenweb.com>
.. heavily adapted from original documentation by Sam Rushing
"""
import heapq
import logging
import os
import select
import socket
import sys
import time
from collections import deque
from errno import (
EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL,
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN,
errorcode
)
from .compat import long, b, bytes, buffer
class ExitNow(Exception):
pass
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
_RERAISEABLE_EXC = (ExitNow, KeyboardInterrupt, SystemExit)
_SOCKET_MAP = {}
_SCHEDULED_TASKS = []
log = logging.getLogger(__name__)
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" % err
def read(obj):
"""Triggers ``handle_read_event`` for specified object."""
try:
obj.handle_read_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def write(obj):
"""Triggers ``handle_write_event`` for specified object."""
try:
obj.handle_write_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def exception(obj):
"""Triggers ``handle_exception_event`` for specified object."""
try:
obj.handle_exception_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_exception_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = map or _SOCKET_MAP
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
exception(obj)
def scheduler(tasks=None):
if tasks is None:
tasks = _SCHEDULED_TASKS
now = time.time()
while tasks and now >= tasks[0].timeout:
call = heapq.heappop(tasks)
if call.repush:
heapq.heappush(tasks, call)
call.repush = False
continue
try:
call.call()
finally:
if not call.cancelled:
call.cancel()
def loop(timeout=30.0, map=None, tasks=None, count=None):
"""
Enter a polling loop that terminates after count passes or all open
channels have been closed. All arguments are optional. The *count*
parameter defaults to None, resulting in the loop terminating only when all
channels have been closed. The *timeout* argument sets the timeout
parameter for the appropriate :func:`select` or :func:`poll` call, measured
in seconds; the default is 30 seconds. The *use_poll* parameter, if true,
indicates that :func:`poll` should be used in preference to :func:`select`
(the default is ``False``).
The *map* parameter is a dictionary whose items are the channels to watch.
As channels are closed they are deleted from their map. If *map* is
omitted, a global map is used. Channels (instances of
:class:`asyncore.dispatcher`, :class:`asynchat.async_chat` and subclasses
thereof) can freely be mixed in the map.
"""
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
if count is None:
while map or tasks:
if map:
poll(timeout, map)
if tasks:
scheduler()
else:
while (map or tasks) and count > 0:
if map:
poll(timeout, map)
if tasks:
scheduler()
count -= 1
class call_later:
"""Calls a function at a later time.
It can be used to asynchronously schedule a call within the polling
loop without blocking it. The instance returned is an object that
can be used to cancel or reschedule the call.
"""
def __init__(self, seconds, target, *args, **kwargs):
"""
- seconds: the number of seconds to wait
- target: the callable object to call later
- args: the arguments to call it with
- kwargs: the keyword arguments to call it with
- _tasks: a reserved keyword to specify a different list to
store the delayed call instances.
"""
assert callable(target), "%s is not callable" % target
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
self.__target = target
self.__args = args
self.__kwargs = kwargs
self.__tasks = kwargs.pop('_tasks', _SCHEDULED_TASKS)
# seconds from the epoch at which to call the function
self.timeout = time.time() + self.__delay
self.repush = False
self.cancelled = False
heapq.heappush(self.__tasks, self)
def __lt__(self, other):
return self.timeout <= other.timeout
def call(self):
"""Call this scheduled function."""
assert not self.cancelled, "Already cancelled"
self.__target(*self.__args, **self.__kwargs)
def reset(self):
"""Reschedule this call resetting the current countdown."""
assert not self.cancelled, "Already cancelled"
self.timeout = time.time() + self.__delay
self.repush = True
def delay(self, seconds):
"""Reschedule this call for a later time."""
assert not self.cancelled, "Already cancelled."
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
newtime = time.time() + self.__delay
if newtime > self.timeout:
self.timeout = newtime
self.repush = True
else:
# XXX - slow, can be improved
self.timeout = newtime
heapq.heapify(self.__tasks)
def cancel(self):
"""Unschedule this call."""
assert not self.cancelled, "Already cancelled"
self.cancelled = True
del self.__target, self.__args, self.__kwargs
if self in self.__tasks:
pos = self.__tasks.index(self)
if pos == 0:
heapq.heappop(self.__tasks)
elif pos == len(self.__tasks) - 1:
self.__tasks.pop(pos)
else:
self.__tasks[pos] = self.__tasks.pop()
heapq._siftup(self.__tasks, pos)
class Dispatcher(object):
"""
The :class:`Dispatcher` class is a thin wrapper around a low-level socket
object. To make it more useful, it has a few methods for event-handling
which are called from the asynchronous loop. Otherwise, it can be treated
as a normal non-blocking socket object.
The firing of low-level events at certain times or in certain connection
states tells the asynchronous loop that certain higher-level events have
taken place. For example, if we have asked for a socket to connect to
another host, we know that the connection has been made when the socket
becomes writable for the first time (at this point you know that you may
write to it with the expectation of success). The implied higher-level
events are:
+----------------------+----------------------------------------+
| Event | Description |
+======================+========================================+
| ``handle_connect()`` | Implied by the first read or write |
| | event |
+----------------------+----------------------------------------+
| ``handle_close()`` | Implied by a read event with no data |
| | available |
+----------------------+----------------------------------------+
| ``handle_accept()`` | Implied by a read event on a listening |
| | socket |
+----------------------+----------------------------------------+
During asynchronous processing, each mapped channel's :meth:`readable` and
:meth:`writable` methods are used to determine whether the channel's socket
should be added to the list of channels :c:func:`select`\ ed or
:c:func:`poll`\ ed for read and write events.
"""
connected = False
accepting = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = _SOCKET_MAP
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self._del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__ + '.' + self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def _add_channel(self, map=None):
log.debug('Adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def _del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
log.debug('Closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
"""
This is identical to the creation of a normal socket, and will use
the same options for creation. Refer to the :mod:`socket` documentation
for information on creating sockets.
"""
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
self._fileno = sock.fileno()
self._add_channel(map)
def set_reuse_addr(self):
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
def readable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which read events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in read events."""
return True
def writable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which write events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in write events.
"""
return True
def listen(self, num):
"""Listen for connections made to the socket.
The `num` argument specifies the maximum number of queued connections
and should be at least 1; the maximum value is system-dependent
(usually 5)."""
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, address):
"""Bind the socket to `address`.
The socket must not already be bound. The format of `address` depends
on the address family --- refer to the :mod:`socket` documentation for
more information. To mark the socket as re-usable (setting the
:const:`SO_REUSEADDR` option), call the :class:`Dispatcher` object's
:meth:`set_reuse_addr` method.
"""
self.addr = address
return self.socket.bind(address)
def connect(self, address):
"""
As with the normal socket object, `address` is a tuple with the first
element the host to connect to, and the second the port number.
"""
self.connected = False
self.addr = address
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK)\
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value can be either ``None`` or a pair ``(conn, address)``
where `conn` is a *new* socket object usable to send and receive data on
the connection, and *address* is the address bound to the socket on the
other end of the connection.
When ``None`` is returned it means the connection didn't take place, in
which case the server should just ignore this event and keep listening
for further incoming connections.
"""
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as err:
if err.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
"""Send `data` to the remote end-point of the socket."""
try:
log.debug('[%s:%d] <<< %r', self.addr[0], self.addr[1], data)
result = self.socket.send(data)
return result
except socket.error as err:
if err.args[0] == EWOULDBLOCK:
return 0
elif err.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
"""Read at most `buffer_size` bytes from the socket's remote end-point.
An empty string implies that the channel has been closed from the other
end.
"""
try:
data = self.socket.recv(buffer_size)
log.debug('[%s:%d] >>> %r', self.addr[0], self.addr[1], data)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as err:
# winsock sometimes throws ENOTCONN
if err.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
"""Close the socket.
All future operations on the socket object will fail.
The remote end-point will receive no more data (after queued data is
flushed). Sockets are automatically closed when they are
garbage-collected.
"""
self.connected = False
self.accepting = False
self._del_channel()
try:
self.socket.close()
except socket.error as err:
if err.args[0] not in (ENOTCONN, EBADF):
raise
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_exception_event(self):
# handle_exception_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_exception()
def handle_error(self):
"""
Called when an exception is raised and not otherwise handled.
The default version prints a condensed traceback.
"""
try:
self_repr = repr(self)
except Exception:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
log.exception('Uncatched python exception, closing channel %s',
self_repr)
self.handle_close()
def handle_exception(self):
log.exception('Unknown error')
def handle_read(self):
log.debug('Unhandled read event')
def handle_write(self):
"""
Called when the asynchronous loop detects that a writable socket can be
written. Often this method will implement the necessary buffering for
performance. For example::
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
"""
log.debug('Unhandled write event')
def handle_connect(self):
"""
Called when the active opener's socket actually makes a connection.
Might send a "welcome" banner, or initiate a protocol negotiation with
the remote endpoint, for example.
"""
log.info('[%s:%d] Connection established', self.addr[0], self.addr[1])
def handle_accept(self):
"""
Called on listening channels (passive openers) when a connection can be
established with a new remote endpoint that has issued a :meth:`connect`
call for the local endpoint.
"""
log.info('[%s:%d] Connection accepted', self.addr[0], self.addr[1])
def handle_close(self):
"""Called when the socket is closed."""
log.info('[%s:%d] Connection closed', self.addr[0], self.addr[1])
self.close()
def close_all(map=None, tasks=None, ignore_all=False):
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
for x in list(map.values()):
try:
x.close()
except OSError as err:
if err.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
map.clear()
for x in tasks:
try:
x.cancel()
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
del tasks[:]
class AsyncChat(Dispatcher):
"""
This class is an abstract subclass of :class:`Dispatcher`. To make
practical use of the code you must subclass :class:`AsyncChat`, providing
meaningful meth:`found_terminator` method.
The :class:`Dispatcher` methods can be used, although not all make
sense in a message/response context.
Like :class:`Dispatcher`, :class:`AsyncChat` defines a set of
events that are generated by an analysis of socket conditions after a
:c:func:`select` call. Once the polling loop has been started the
:class:`AsyncChat` object's methods are called by the event-processing
framework with no action on the part of the programmer.
"""
# these are overridable defaults
#: The asynchronous input buffer size.
recv_buffer_size = 4096
#: The asynchronous output buffer size.
send_buffer_size = 4096
#: Encoding usage is not enabled by default, because that is a
#: sign of an application bug that we don't want to pass silently.
use_encoding = False
#: Default encoding.
encoding = 'latin-1'
#: Remove terminator from the result data.
strip_terminator = True
_terminator = None
def __init__(self, sock=None, map=None):
# for string terminator matching
self._input_buffer = b''
self.inbox = deque()
self.outbox = deque()
super(AsyncChat, self).__init__(sock, map)
self.collect_incoming_data = self.pull
self.initiate_send = self.flush
def pull(self, data):
"""Puts `data` into incoming queue. Also available by alias
`collect_incoming_data`.
"""
self.inbox.append(data)
def found_terminator(self):
"""
Called when the incoming data stream matches the :attr:`termination`
condition. The default method, which must be overridden, raises a
:exc:`NotImplementedError` exception. The buffered input data should be
available via an instance attribute.
"""
raise NotImplementedError("must be implemented in subclass")
def _set_terminator(self, term):
self._terminator = term
def _get_terminator(self):
return self._terminator
#: The input delimiter and the terminating condition to be recognized on the
#: channel. May be any of three types of value, corresponding to three
#: different ways to handle incoming protocol data.
#:
#: +-----------+---------------------------------------------+
#: | term | Description |
#: +===========+=============================================+
#: | *string* | Will call :meth:`found_terminator` when the |
#: | | string is found in the input stream |
#: +-----------+---------------------------------------------+
#: | *integer* | Will call :meth:`found_terminator` when the |
#: | | indicated number of characters have been |
#: | | received |
#: +-----------+---------------------------------------------+
#: | ``None`` | The channel continues to collect data |
#: | | forever |
#: +-----------+---------------------------------------------+
#:
#: Note that any data following the terminator will be available for reading
#: by the channel after :meth:`found_terminator` is called.
terminator = property(_get_terminator, _set_terminator)
def handle_read(self):
try:
data = self.recv(self.recv_buffer_size)
except socket.error as err:
self.handle_error()
return
if self.use_encoding and not isinstance():
data = data.decode(self.encoding)
self._input_buffer += data
while self._input_buffer:
terminator = self.terminator
if not terminator:
handler = self._lookup_none_terminator
elif isinstance(terminator, (int, long)):
handler = self._lookup_int_terminator
elif isinstance(terminator, str):
handler = self._lookup_str_terminator
else:
handler = self._lookup_list_terminator
res = handler(self.terminator)
if res is None:
break
def _lookup_none_terminator(self, terminator):
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def _lookup_int_terminator(self, terminator):
if len(self._input_buffer) < terminator:
self.pull(self._input_buffer)
self._input_buffer = ''
return False
else:
self.pull(self._input_buffer[:terminator])
self._input_buffer = self._input_buffer[terminator:]
self.found_terminator()
return True
def _lookup_list_terminator(self, terminator):
for item in terminator:
if self._input_buffer.find(item) != -1:
return self._lookup_str_terminator(item)
return self._lookup_none_terminator(terminator)
def _lookup_str_terminator(self, terminator):
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self._input_buffer.find(terminator)
if index != -1:
# we found the terminator
if self.strip_terminator and index > 0:
self.pull(self._input_buffer[:index])
elif not self.strip_terminator:
self.pull(self._input_buffer[:index+terminator_len])
self._input_buffer = self._input_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
return True
else:
# check for a prefix of the terminator
index = find_prefix_at_end(self._input_buffer, terminator)
if index:
if index != len(self._input_buffer):
# we found a prefix, collect up to the prefix
self.pull(self._input_buffer[:-index])
self._input_buffer = self._input_buffer[-index:]
return None
else:
# no prefix, collect it all
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def handle_write(self):
self.flush()
def push(self, data):
"""
Pushes data on to the channel's fifo to ensure its transmission.
This is all you need to do to have the channel write the data out to
the network.
"""
sabs = self.send_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.outbox.append(data[i:i+sabs])
else:
self.outbox.append(data)
return self.flush()
def push_with_producer(self, producer):
self.outbox.append(producer)
return self.flush()
def readable(self):
"""Predicate for inclusion in the readable for select()"""
return True
def writable(self):
"""Predicate for inclusion in the writable for select()"""
# For nonblocking sockets connect() will not set self.connected flag,
# due to EINPROGRESS socket error which is actually promise for
# successful connection.
return bool(self.outbox or not self.connected)
def close_when_done(self):
"""Automatically close this channel once the outgoing queue is empty."""
self.outbox.append(None)
def flush(self):
"""Sends all data from outgoing queue."""
while self.outbox and self.connected:
self._send_chunky(self.outbox.popleft())
def _send_chunky(self, data):
"""Sends data as chunks sized by ``send_buffer_size`` value.
Returns ``True`` on success, ``False`` on error and ``None`` on closing
event.
"""
if self.use_encoding and not isinstance(data, bytes):
data = data.encode(self.encoding)
while True:
if data is None:
self.handle_close()
return
obs = self.send_buffer_size
bdata = buffer(data, 0, obs)
try:
num_sent = self.send(bdata)
except socket.error:
self.handle_error()
return False
if num_sent and num_sent < len(bdata) or obs < len(data):
data = data[num_sent:]
else:
return True
def discard_buffers(self):
"""In emergencies this method will discard any data held in the input
and output buffers."""
self.discard_input_buffers()
self.discard_output_buffers()
def discard_input_buffers(self):
self._input_buffer = b('')
self.inbox.clear()
def discard_output_buffers(self):
self.outbox.clear()
def find_prefix_at_end(haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| Alwnikrotikz/python-astm | astm/asynclib.py | Python | bsd-3-clause | 32,183 | [
"Amber"
] | 017b63522d347a9b9c7192d0b7d71f80bac5ff21b69960ace2a88c7c2bc836fe |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import numpy as np
from CrystalField import CrystalField, Function
from .fitting import islistlike
def makeWorkspace(xArray, yArray, child=True, ws_name='dummy'):
"""
Create a workspace.
@param xArray: DataX values
@param yArray: DataY values
@param child: if true, the workspace won't appear in the ADS
@param ws_name: name of the workspace
"""
from mantid.api import AlgorithmManager
alg = AlgorithmManager.createUnmanaged('CreateWorkspace')
alg.initialize()
alg.setChild(child)
alg.setProperty('DataX', xArray)
alg.setProperty('DataY', yArray)
alg.setProperty('OutputWorkspace', ws_name)
alg.execute()
return alg.getProperty('OutputWorkspace').value
def get_parameters_for_add(cf, new_ion_index):
"""get params from crystalField object to append"""
ion_prefix = 'ion{}.'.format(new_ion_index)
return get_parameters(cf, ion_prefix, '')
def get_parameters_for_add_from_multisite(cfms, new_ion_index):
"""get params from crystalFieldMultiSite object to append"""
params = {}
for i in range(len(cfms.Ions)):
ion_prefix = 'ion{}.'.format(new_ion_index + i)
existing_prefix = 'ion{}.'.format(i) if cfms._isMultiSite() else ''
params.update(get_parameters(cfms, ion_prefix, existing_prefix))
return params
def get_parameters(crystal_field, ion_prefix, existing_prefix):
params = {}
for bparam in CrystalField.field_parameter_names:
params[ion_prefix + bparam] = crystal_field[existing_prefix + bparam]
return params
class CrystalFieldMultiSite(object):
def __init__(self, Ions, Symmetries, **kwargs):
from collections import OrderedDict
self._makeFunction()
bg_params = {}
backgroundPeak = kwargs.pop('BackgroundPeak', None)
if backgroundPeak is not None:
bg_params['peak'] = backgroundPeak
background = kwargs.pop('Background', None)
if background is not None:
bg_params['background'] = background
if len(bg_params) > 0:
self._setBackground(**bg_params)
self.Ions = Ions
self.Symmetries = Symmetries
self._plot_window = {}
self.chi2 = None
self._resolutionModel = None
parameter_dict = kwargs.pop('parameters', None)
attribute_dict = kwargs.pop('attributes', None)
ties_dict = kwargs.pop('ties', None)
constraints_list = kwargs.pop('constraints', None)
fix_list = kwargs.pop('fixedParameters', None)
kwargs = self._setMandatoryArguments(kwargs)
self._abundances = OrderedDict()
abundances= kwargs.pop('abundances', None)
self._makeAbundances(abundances)
self._setRemainingArguments(kwargs)
self.default_spectrum_size = 200
if attribute_dict is not None:
for name, value in attribute_dict.items():
self.function.setAttributeValue(name, value)
if parameter_dict is not None:
for name, value in parameter_dict.items():
self.function.setParameter(name, value)
if ties_dict:
for name, value in parameter_dict.items():
self.function.tie(name, value)
if constraints_list:
self.function.addConstraints(','.join(constraints_list))
if fix_list:
for param in fix_list:
self.function.fixParameter(param)
def _setMandatoryArguments(self, kwargs):
if 'Temperatures' in kwargs or 'Temperature' in kwargs:
self.Temperatures = kwargs.pop('Temperatures') if 'Temperatures' in kwargs else kwargs.pop('Temperature')
if 'FWHM' in kwargs or 'FWHMs' in kwargs:
self.FWHM = kwargs.pop('FWHMs') if 'FWHMs' in kwargs else kwargs.pop('FWHM')
elif 'ResolutionModel' in kwargs:
self.ResolutionModel = kwargs.pop('ResolutionModel')
else:
raise RuntimeError("If temperatures are set, must also set FWHM or ResolutionModel")
return kwargs
def _setRemainingArguments(self, kwargs):
possible_args = ['ToleranceEnergy', 'ToleranceIntensity', 'NPeaks', 'FWHMVariation', 'FixAllPeaks',
'PeakShape', 'PhysicalProperty']
for attribute in possible_args:
value = kwargs.pop(attribute, None)
if value is not None:
setattr(self, attribute, value)
for key in kwargs: # Crystal field parameters remain - must be set last
self.function.setParameter(key, kwargs[key])
def _isMultiSite(self):
return len(self.Ions) > 1
def _makeFunction(self):
from mantid.simpleapi import FunctionFactory
self.function = FunctionFactory.createFunction('CrystalFieldFunction')
def getParameter(self, param):
self.function.getParameterValue(param)
def _getSpectrumTwoArgs(self, arg1, arg2):
if isinstance(arg1, int):
i = arg1
ws = arg2
ws_index = 0
if self.Temperatures[i] < 0:
raise RuntimeError('You must first define a valid temperature for spectrum {}'.format(i))
elif isinstance(arg2, int):
i = 0
ws = arg1
ws_index = arg2
else:
raise TypeError('expected int for one argument in GetSpectrum, got {0} and {1}'.format(
arg1.__class__.__name__, arg2.__class__.__name__))
if isinstance(ws, list) or isinstance(ws, np.ndarray):
ws = self._convertToWS(ws)
return self._calcSpectrum(i, ws, ws_index)
def calc_xmin_xmax(self, i=0):
peaks = np.array([])
for idx in range(len(self.Ions)):
blm = {}
for bparam in CrystalField.field_parameter_names:
blm[bparam] = self.function.getParameterValue('ion{}.'.format(idx) + bparam)
_cft = CrystalField(self.Ions[idx], 'C1', Temperature=self.Temperatures[i], **blm)
peaks = np.append(peaks, _cft.getPeakList()[0])
return np.min(peaks), np.max(peaks)
def getSpectrum(self, *args):
"""
Get a specified spectrum calculated with the current field and peak parameters.
Alternatively can be called getSpectrum(workspace, ws_index). Spectrum index is assumed zero.
Examples:
cf.getSpectrum() # Calculate the first spectrum using automatically generated x-values
cf.getSpectrum(1) # Calculate the second spectrum using automatically generated x-values
cf.getSpectrum(1, ws, 5) # Calculate the second spectrum using the x-values from the 6th spectrum
# in workspace ws.
cf.getSpectrum(ws) # Calculate the first spectrum using the x-values from the 1st spectrum
# in workspace ws.
cf.getSpectrum(ws, 3) # Calculate the first spectrum using the x-values from the 4th spectrum
# in workspace ws.
cf.getSpectrum(2, ws) # Calculate the third spectrum using the x-values from the 1st spectrum
# in workspace ws.
@return: A tuple of (x, y) arrays
"""
if len(args) == 3:
if self.Temperatures[args[0]] < 0:
raise RuntimeError('You must first define a temperature for the spectrum')
return self._calcSpectrum(args[0], args[1], args[2])
elif len(args) == 1:
if isinstance(args[0], int):
x_min, x_max = self.calc_xmin_xmax(args[0])
xArray = np.linspace(x_min, x_max, self.default_spectrum_size)
return self._calcSpectrum(args[0], xArray, 0)
else:
return self._calcSpectrum(0, args[0], 0)
elif len(args) == 2:
return self._getSpectrumTwoArgs(*args)
else:
x_min, x_max = self.calc_xmin_xmax()
xArray = np.linspace(x_min, x_max, self.default_spectrum_size)
return self._calcSpectrum(0, xArray, 0)
def _convertToWS(self, wksp_list):
"""
converts a list or numpy array to workspace
@param wksp_list: A list or ndarray used to make the workspace
"""
xArray = wksp_list
yArray = np.zeros_like(xArray)
return makeWorkspace(xArray, yArray)
def _calcSpectrum(self, i, workspace, ws_index):
"""Calculate i-th spectrum.
@param i: Index of a spectrum or function string
@param workspace: A workspace / list / ndarray used to evaluate the spectrum function
@param ws_index: An index of a spectrum in workspace to use.
"""
if isinstance(workspace, list) or isinstance(workspace, np.ndarray):
workspace = self._convertToWS(workspace)
from mantid.api import AlgorithmManager
alg = AlgorithmManager.createUnmanaged('EvaluateFunction')
alg.initialize()
alg.setChild(True)
alg.setProperty('Function', self.makeSpectrumFunction(i))
alg.setProperty("InputWorkspace", workspace)
alg.setProperty('WorkspaceIndex', ws_index)
alg.setProperty('OutputWorkspace', 'dummy')
alg.execute()
out = alg.getProperty('OutputWorkspace').value
# Create copies of the x and y because `out` goes out of scope when this method returns
# and x and y get deallocated
return np.array(out.readX(0)), np.array(out.readY(1))
def makeSpectrumFunction(self, i=0):
"""Form a definition string for the CrystalFieldSpectrum function
@param i: Index of a spectrum.
"""
if self.NumberOfSpectra == 1:
return str(self.function)
else:
funs = self.function.createEquivalentFunctions()
return str(funs[i])
def _makeAbundances(self, abundances):
"""Create dict for ion intensity scalings"""
if abundances is not None:
for ion_index in range(len(self.Ions)):
self._abundances['ion{}'.format(ion_index)] = abundances[ion_index]
max_ion = max(self._abundances, key=lambda key: self._abundances[key])
ties = {}
for ion in self._abundances.keys():
if ion is not max_ion:
factor = self._abundances[ion] / self._abundances[max_ion]
tie_key = ion + '.IntensityScaling'
tie_value = str(factor) + '*' + max_ion + ".IntensityScaling"
ties[tie_key] = tie_value
self.ties(ties)
else:
for ion_index in range(len(self.Ions)):
self._abundances['ion{}'.format(ion_index)] = 1.0
def update(self, func):
"""
Update values of the fitting parameters.
@param func: A IFunction object containing new parameter values.
"""
self.function = func
def fix(self, *args):
for a in args:
self.function.fixParameter(a)
def ties(self, *args, **kwargs):
"""
Set ties on the field parameters.
@param kwargs: Ties as name=value pairs: name is a parameter name,
the value is a tie string or a number. For example:
tie(B20 = 0.1, IB23 = '2*B23')
"""
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise TypeError("")
for tie in kwargs:
self.function.tie(tie, str(kwargs[tie]))
def constraints(self, *args):
"""
Set constraints for the field parameters.
@param args: A list of constraints. For example:
constraints('B00 > 0', '0.1 < B43 < 0.9')
"""
self.function.addConstraints(','.join(args))
def plot(self, *args):
"""Plot a spectrum. Parameters are the same as in getSpectrum(...) with additional name argument"""
from mantidplot import plotSpectrum
ws_name = args[3] if len(args) == 4 else 'CrystalFieldMultiSite_{}'.format(self.Ions)
xArray, yArray = self.getSpectrum(*args)
ws_name += '_{}'.format(args[0])
if isinstance(args[0], int):
ws_name += '_{}'.format(args[1])
makeWorkspace(xArray, yArray, child=False, ws_name=ws_name)
plotSpectrum(ws_name, 0)
def _setBackground(self, **kwargs):
"""
Set background function(s).
Can provide one argument or both. Each argument can be a string or FunctionWrapper object.
Can also pass two functions as one argument by passing a single string or CompositeFunctionWrapper.
Examples:
setBackground(Gaussian())
setBackground(background=LinearBackground())
setBackground(peak='name=Gaussian,Height=1', background='name=LinearBackground')
setBackground(Gaussian(), 'name=LinearBackground')
setBackground(Gaussian() + LinearBackground())
setBackground('name=Gaussian,Height=0,PeakCentre=1,Sigma=0;name=LinearBackground,A0=0,A1=0')
@param peak: A function passed as the peak. Can be a string or FunctionWrapper e.g.
'name=Gaussian,Height=0,PeakCentre=1,Sigma=0' or Gaussian(PeakCentre=1)
@param background: A function passed as the background. Can be a string or FunctionWrapper e.g.
'name=LinearBackground,A0=1' or LinearBackground(A0=1)
"""
self._background = Function(self.function, prefix='bg.')
if len(kwargs) == 2:
self._setCompositeBackground(kwargs['peak'], kwargs['background'])
elif len(kwargs) == 1:
if 'peak' in kwargs.keys():
self._setSingleBackground(kwargs['peak'], 'peak')
elif 'background' in kwargs.keys():
self._setSingleBackground(kwargs['background'], 'background')
else:
raise RuntimeError('_setBackground expects peak or background arguments only')
else:
raise RuntimeError('_setBackground takes 1 or 2 arguments, got {}'.format(len(kwargs)))
def _setSingleBackground(self, background, property_name):
from mantid.fitfunctions import FunctionWrapper, CompositeFunctionWrapper
if isinstance(background, str):
self._setBackgroundUsingString(background, property_name)
elif isinstance(background, CompositeFunctionWrapper):
if len(background) == 2:
peak, background = str(background).split(';')
self._setCompositeBackground(peak, background)
else:
raise ValueError("composite function passed to background must have "
"exactly 2 functions, got {}".format(len(background)))
elif isinstance(background, FunctionWrapper):
setattr(self._background, property_name, Function(self.function, prefix='bg.'))
self.function.setAttributeValue('Background', str(background))
else:
raise TypeError("background argument(s) must be string or function object(s)")
def _setCompositeBackground(self, peak, background):
self._background.peak = Function(self.function, prefix='bg.f0.')
self._background.background = Function(self.function, prefix='bg.f1.')
self.function.setAttributeValue('Background', '{0};{1}'.format(peak, background))
def _setBackgroundUsingString(self, background, property_name):
number_of_functions = background.count(';') + 1
if number_of_functions == 2:
peak, background = background.split(';')
self._setCompositeBackground(peak, background)
elif number_of_functions == 1:
setattr(self._background, property_name, Function(self.function, prefix='bg.'))
self.function.setAttributeValue('Background', background)
else:
raise ValueError("string passed to background must have exactly 1 or 2 functions, got {}".format(
number_of_functions))
def _combine_multisite(self, other):
"""Used to add two CrystalFieldMultiSite"""
ions = self.Ions + other.Ions
symmetries = self.Symmetries + other.Symmetries
abundances = list(self._abundances.values()) + list(other._abundances.values())
params = get_parameters_for_add_from_multisite(self, 0)
params.update(get_parameters_for_add_from_multisite(other, len(self.Ions)))
new_cf = CrystalFieldMultiSite(Ions=ions, Symmetries=symmetries, Temperatures=self.Temperatures,
FWHM=self.FWHM, parameters=params, abundances=abundances)
return new_cf
def __getitem__(self, item):
if self.function.hasAttribute(item):
return self.function.getAttributeValue(item)
else:
return self.function.getParameterValue(item)
def __setitem__(self, key, value):
self.function.setParameter(key, value)
def __add__(self, other):
scale_factor = 1.0
if hasattr(other, 'abundance'): # is CrystalFieldSite
scale_factor = other.abundance
other = other.crystalField
elif isinstance(other, CrystalFieldMultiSite):
return self._combine_multisite(other)
if not isinstance(other, CrystalField):
raise TypeError('Unsupported operand type(s) for +: '
'CrystalFieldMultiSite and {}'.format(other.__class__.__name__))
ions = self.Ions + [other.Ion]
symmetries = self.Symmetries + [other.Symmetry]
abundances = list(self._abundances.values()) + [scale_factor]
params = get_parameters_for_add_from_multisite(self, 0)
params.update(get_parameters_for_add(other, len(self.Ions)))
new_cf = CrystalFieldMultiSite(Ions=ions, Symmetries=symmetries, Temperatures=self.Temperatures,
FWHM=self.FWHM, parameters=params, abundances=abundances)
return new_cf
def __radd__(self, other):
scale_factor = 1.0
if hasattr(other, 'abundance'): # is CrystalFieldSite
scale_factor = other.abundance
other = other.crystalField
if not isinstance(other, CrystalField):
raise TypeError('Unsupported operand type(s) for +: '
'CrystalFieldMultiSite and {}'.format(other.__class__.__name__))
ions = [other.Ion] + self.Ions
symmetries = [other.Symmetry] + self.Symmetries
abundances = [scale_factor] + list(self._abundances.values())
params = get_parameters_for_add(other, 0)
params.update(get_parameters_for_add_from_multisite(self, 1))
new_cf = CrystalFieldMultiSite(Ions=ions, Symmetries=symmetries, Temperatures=self.Temperatures,
FWHM=self.FWHM, parameters=params, abundances=abundances)
return new_cf
@property
def background(self):
return self._background
@background.setter
def background(self, value):
if hasattr(value, 'peak') and hasattr(value, 'background'):
# Input is a CrystalField.Background object
if value.peak and value.background:
self._setBackground(peak=str(value.peak.function), background=str(value.background.function))
elif value.peak:
self._setBackground(peak=str(value.peak.function))
else:
self._setBackground(background=str(value.background.function))
elif hasattr(value, 'function'):
self._setBackground(background=str(value.function))
else:
self._setBackground(background=value)
# Need this for a weird python bug: "IndexError: Function index (2) out of range (2)"
# if user calls print(self.function) after setting background
_ = self.function.getTies() # noqa: F841
@property
def Ions(self):
string_ions = self.function.getAttributeValue('Ions')
string_ions = string_ions[1:-1]
return string_ions.split(",")
@Ions.setter
def Ions(self, value):
if isinstance(value, str):
self.function.setAttributeValue('Ions', value)
else:
self.function.setAttributeValue('Ions', ','.join(value))
@property
def Symmetries(self):
string_symmetries = self.function.getAttributeValue('Symmetries')
string_symmetries = string_symmetries[1:-1]
return string_symmetries.split(",")
@Symmetries.setter
def Symmetries(self, value):
if isinstance(value, str):
self.function.setAttributeValue('Symmetries', value)
else:
self.function.setAttributeValue('Symmetries', ','.join(value))
@property
def ToleranceEnergy(self):
"""Get energy tolerance"""
return self.function.getAttributeValue('ToleranceEnergy')
@ToleranceEnergy.setter
def ToleranceEnergy(self, value):
"""Set energy tolerance"""
self.function.setAttributeValue('ToleranceEnergy', float(value))
@property
def ToleranceIntensity(self):
"""Get intensity tolerance"""
return self.function.getAttributeValue('ToleranceIntensity')
@ToleranceIntensity.setter
def ToleranceIntensity(self, value):
"""Set intensity tolerance"""
self.function.setAttributeValue('ToleranceIntensity', float(value))
@property
def Temperatures(self):
return list(self.function.getAttributeValue("Temperatures"))
@Temperatures.setter
def Temperatures(self, value):
self.function.setAttributeValue('Temperatures', value)
@property
def Temperature(self):
return list(self.function.getAttributeValue("Temperatures"))
@Temperature.setter
def Temperatures(self, value):
self.function.setAttributeValue('Temperatures', value)
@property
def FWHMs(self):
fwhm = self.function.getAttributeValue('FWHMs')
nDatasets = len(self.Temperatures)
if len(fwhm) != nDatasets:
return list(fwhm) * nDatasets
return list(fwhm)
@FWHMs.setter
def FWHMs(self, value):
if islistlike(value):
if len(value) != len(self.Temperatures):
value = [value[0]] * len(self.Temperatures)
else:
value = [value] * len(self.Temperatures)
self.function.setAttributeValue('FWHMs', value)
self._resolutionModel = None
@property
def FWHM(self):
return self.FWHMs
@FWHM.setter
def FWHM(self, value):
self.FWHMs = value
@property
def ResolutionModel(self):
return self._resolutionModel
@ResolutionModel.setter
def ResolutionModel(self, value):
from .function import ResolutionModel
if hasattr(value, 'model'):
self._resolutionModel = value
else:
self._resolutionModel = ResolutionModel(value)
nSpec = len(self.Temperatures)
if nSpec > 1:
if not self._resolutionModel.multi or self._resolutionModel.NumberOfSpectra != nSpec:
raise RuntimeError('Resolution model is expected to have %s functions, found %s' %
(nSpec, self._resolutionModel.NumberOfSpectra))
for i in range(nSpec):
model = self._resolutionModel.model[i]
self.function.setAttributeValue('sp%i.FWHMX' % i, model[0])
self.function.setAttributeValue('sp%i.FWHMY' % i, model[1])
else:
model = self._resolutionModel.model
self.function.setAttributeValue('FWHMX', model[0])
self.function.setAttributeValue('FWHMY', model[1])
@property
def FWHMVariation(self):
return self.function.getAttributeValue('FWHMVariation')
@FWHMVariation.setter
def FWHMVariation(self, value):
self.function.setAttributeValue('FWHMVariation', float(value))
@property
def FixAllPeaks(self):
return self.function.getAttributeValue('FixAllPeaks')
@FixAllPeaks.setter
def FixAllPeaks(self, value):
self.function.setAttributeValue('FixAllPeaks', value)
@property
def PeakShape(self):
return self.function.getAttributeValue('PeakShape')
@PeakShape.setter
def PeakShape(self, value):
self.function.setAttributeValue('PeakShape', value)
@property
def NumberOfSpectra(self):
return self.function.getNumberDomains()
@property
def NPeaks(self):
return self.function.getAttributeValue('NPeaks')
@NPeaks.setter
def NPeaks(self, value):
self.function.setAttributeValue('NPeaks', value)
| mganeva/mantid | scripts/Inelastic/CrystalField/CrystalFieldMultiSite.py | Python | gpl-3.0 | 25,091 | [
"CRYSTAL",
"Gaussian"
] | 6bbcf219e1ffed37247f16de0babb4eef5990ae75e9a8e348bbcfe934b252feb |
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""basic checker for Python code"""
import collections
import itertools
import sys
import re
import six
from six.moves import zip # pylint: disable=redefined-builtin
import astroid
import astroid.bases
import astroid.scoped_nodes
from pylint import checkers
from pylint import exceptions
from pylint import interfaces
from pylint.checkers import utils
from pylint import reporters
from pylint.checkers.utils import get_node_last_lineno
from pylint.reporters.ureports import nodes as reporter_nodes
import pylint.utils as lint_utils
class NamingStyle(object):
# It may seem counterintuitive that single naming style
# has multiple "accepted" forms of regular expressions,
# but we need to special-case stuff like dunder names
# in method names.
CLASS_NAME_RGX = None
MOD_NAME_RGX = None
CONST_NAME_RGX = None
COMP_VAR_RGX = None
DEFAULT_NAME_RGX = None
CLASS_ATTRIBUTE_RGX = None
@classmethod
def get_regex(cls, name_type):
return {
'module': cls.MOD_NAME_RGX,
'const': cls.CONST_NAME_RGX,
'class': cls.CLASS_NAME_RGX,
'function': cls.DEFAULT_NAME_RGX,
'method': cls.DEFAULT_NAME_RGX,
'attr': cls.DEFAULT_NAME_RGX,
'argument': cls.DEFAULT_NAME_RGX,
'variable': cls.DEFAULT_NAME_RGX,
'class_attribute': cls.CLASS_ATTRIBUTE_RGX,
'inlinevar': cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
CLASS_NAME_RGX = re.compile('[a-z_][a-z0-9_]+$')
MOD_NAME_RGX = re.compile('([a-z_][a-z0-9_]*)$')
CONST_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[a-z_][a-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('(([a-z_][a-z0-9_]{2,30})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$')
CLASS_ATTRIBUTE_RGX = re.compile(r'(([a-z_][a-z0-9_]{2,30}|(__.*__)))$')
class CamelCaseStyle(NamingStyle):
CLASS_NAME_RGX = re.compile('[a-z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('([a-z_][a-zA-Z0-9]*)$')
CONST_NAME_RGX = re.compile('(([a-z_][A-Za-z0-9]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[a-z_][A-Za-z0-9]*$')
DEFAULT_NAME_RGX = re.compile('(([a-z_][a-zA-Z0-9]{2,30})|(__[a-z][a-zA-Z0-9_]+__))$')
CLASS_ATTRIBUTE_RGX = re.compile(r'([a-z_][A-Za-z0-9]{2,30}|(__.*__))$')
class PascalCaseStyle(NamingStyle):
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Za-z0-9]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
DEFAULT_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]{2,30}$|(__[a-z][a-zA-Z0-9_]+__)$')
CLASS_ATTRIBUTE_RGX = re.compile('[A-Z_][a-zA-Z0-9]{2,30}$')
class UpperCaseStyle(NamingStyle):
CLASS_NAME_RGX = re.compile('[A-Z_][A-Z0-9_]+$')
MOD_NAME_RGX = re.compile('[A-Z_][A-Z0-9_]+$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Z_][A-Z0-9_]+$')
DEFAULT_NAME_RGX = re.compile('([A-Z_][A-Z0-9_]{2,30})|(__[a-z][a-zA-Z0-9_]+__)$')
CLASS_ATTRIBUTE_RGX = re.compile('[A-Z_][A-Z0-9_]{2,30}$')
class AnyStyle(NamingStyle):
@classmethod
def get_regex(cls, name_type):
return re.compile('.*')
NAMING_STYLES = {'snake_case': SnakeCaseStyle, 'camelCase': CamelCaseStyle,
'PascalCase': PascalCaseStyle, 'UPPER_CASE': UpperCaseStyle,
'any': AnyStyle}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile('^_')
REVERSED_PROTOCOL_METHOD = '__reversed__'
SEQUENCE_PROTOCOL_METHODS = ('__getitem__', '__len__')
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS,
(REVERSED_PROTOCOL_METHOD, ))
TYPECHECK_COMPARISON_OPERATORS = frozenset(('is', 'is not', '==',
'!=', 'in', 'not in'))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = 'unittest.case'
BUILTINS = six.moves.builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
PY33 = sys.version_info >= (3, 3)
PY3K = sys.version_info >= (3, 0)
PY35 = sys.version_info >= (3, 5)
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {'exempt', 'ignore'}
# A mapping from builtin-qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(['.'.join([BUILTINS, x]) for x in ('set', 'dict', 'list')],
['set()', '{}', '[]'])
)
REVERSED_COMPS = {'<': '>', '<=': '>=', '>': '<', '>=': '<='}
def _redefines_import(node):
""" Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GeneratorExp)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(parent, 'orelse', []):
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node for _node in loop.nodes_of_class(loop_nodes,
skip_klass=definition_nodes)
if _node != loop
]
return any(
_node for _node in loop.nodes_of_class(astroid.Break,
skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (match is not None and
match.lastgroup is not None and
match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != 'method' or confidence != interfaces.INFERENCE_FAILURE))
if sys.version_info < (3, 0):
BUILTIN_PROPERTY = '__builtin__.property'
else:
BUILTIN_PROPERTY = 'builtins.property'
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = set((BUILTIN_PROPERTY,))
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update((prop.rsplit('.', 1)[-1]
for prop in config.property_classes))
return property_classes, property_names
def _determine_function_name_type(node, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:type node: astroid.node_classes.NodeNG
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Attribute) and
decorator.attrname in property_names)):
infered = utils.safe_infer(decorator)
if infered and infered.qname() in property_classes:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Attribute) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method'
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise exceptions.EmptyReportError()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = reporters.diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Attribute) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit '
'return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).',
{'maxversion': (3, 3)}),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'E0110': ('Abstract class %r with abstract methods instantiated',
'abstract-class-instantiated',
'Used when an abstract class with `abc.ABCMeta` as metaclass '
'has abstract methods and is instantiated.'),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
'E0112': ('More than one starred expression in assignment',
'too-many-star-expressions',
'Emitted when there are more than one starred '
'expressions (`*x`) in an assignment. This is a SyntaxError.',
{'minversion': (3, 0)}),
'E0113': ('Starred assignment target must be in a list or tuple',
'invalid-star-assignment-target',
'Emitted when a star expression is used as a starred '
'assignment target.',
{'minversion': (3, 0)}),
'E0114': ('Can use starred expression only in assignment target',
'star-needs-assignment-target',
'Emitted when a star expression is not used in an '
'assignment target.',
{'minversion': (3, 0)}),
'E0115': ('Name %r is nonlocal and global',
'nonlocal-and-global',
'Emitted when a name is both nonlocal and global.',
{'minversion': (3, 0)}),
'E0116': ("'continue' not supported inside 'finally' clause",
'continue-in-finally',
'Emitted when the `continue` keyword is found '
'inside a finally clause, which is a SyntaxError.'),
'E0117': ("nonlocal name %s found without binding",
'nonlocal-without-binding',
'Emitted when a nonlocal variable does not have an attached '
'name somewhere in the parent scopes',
{'minversion': (3, 0)}),
'E0118': ("Name %r is used prior to global declaration",
'used-prior-global-declaration',
'Emitted when a name is used prior a global declaration, '
'which results in an error since Python 3.6.',
{'minversion': (3, 6)}),
}
@utils.check_messages('function-redefined')
def visit_classdef(self, node):
self._check_redefinition('class', node)
@utils.check_messages('too-many-star-expressions',
'invalid-star-assignment-target')
def visit_assign(self, node):
starred = list(node.targets[0].nodes_of_class(astroid.Starred))
if len(starred) > 1:
self.add_message('too-many-star-expressions', node=node)
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message('invalid-star-assignment-target', node=node)
@utils.check_messages('star-needs-assignment-target')
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if PY35 and isinstance(node.parent,
(astroid.List, astroid.Tuple,
astroid.Set, astroid.Dict)):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message('star-needs-assignment-target', node=node)
@utils.check_messages('init-is-generator', 'return-in-init',
'function-redefined', 'return-arg-in-generator',
'duplicate-argument-name', 'nonlocal-and-global',
'used-prior-global-declaration')
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if (not redefined_by_decorator(node) and
not utils.is_registered_in_singledispatch_function(node)):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astroid.Return,
skip_klass=(astroid.FunctionDef,
astroid.ClassDef))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('init-is-generator', node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message('return-in-init', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
if not PY33:
for retnode in returns:
if isinstance(retnode.value, astroid.Const) and \
retnode.value.value is not None:
self.add_message('return-arg-in-generator', node=node,
line=retnode.fromlineno)
# Check for duplicate names
args = set()
for name in node.argnames():
if name in args:
self.add_message('duplicate-argument-name', node=node, args=(name,))
else:
args.add(name)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message('used-prior-global-declaration',
node=node_name, args=(name, ))
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(from_iter(
child.names for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)))
global_vars = set(from_iter(
child.names for child in node.nodes_of_class(astroid.Global)
if same_scope(child)))
for name in nonlocals.intersection(global_vars):
self.add_message('nonlocal-and-global',
args=(name, ), node=node)
@utils.check_messages('return-outside-function')
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message('return-outside-function', node=node)
@utils.check_messages('yield-outside-function')
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages('yield-outside-function')
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages('not-in-loop', 'continue-in-finally')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@utils.check_messages('not-in-loop')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@utils.check_messages('useless-else-on-loop')
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages('useless-else-on-loop')
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages('nonexistent-operator')
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astroid.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('nonexistent-operator', node=node, args=node.op*2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message('nonlocal-without-binding', args=(name, ),
node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message('nonlocal-without-binding', args=(name, ), node=node)
@utils.check_messages('nonlocal-without-binding')
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages('abstract-class-instantiated')
def visit_call(self, node):
""" Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, infered, node):
if not isinstance(infered, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is infered:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
metaclass = infered.metaclass()
abstract_methods = _has_abstract_methods(infered)
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in infered.ancestors():
if ancestor.qname() == 'abc.ABC' and abstract_methods:
self.add_message('abstract-class-instantiated',
args=(infered.name, ),
node=node)
break
return
if metaclass.qname() == 'abc.ABCMeta' and abstract_methods:
self.add_message('abstract-class-instantiated',
args=(infered.name, ),
node=node)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message('yield-outside-function', node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('useless-else-on-loop', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)):
self.add_message('continue-in-finally', node=node)
_node = _node.parent
self.add_message('not-in-loop', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
dummy_variables_rgx = lint_utils.get_global_option(
self, 'dummy-variables-rgx', default=None)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message('function-redefined', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" '
'statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in '
'a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) '
'any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course '
'has no effect). This is a particular case of W0104 with its '
'own message so you can easily disable it if you\'re using '
'those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned '
'to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call '
'on the same argument list as the lambda itself; such lambda '
'expressions are in all but a few cases replaceable with the '
'function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
'Used when a dictionary expression binds the same key multiple '
'times.'),
'W0122': ('Use of exec',
'exec-used',
'Used when you use the "exec" statement (function for Python '
'3), to discourage its usage. That doesn\'t '
'mean you cannot use it !'),
'W0123': ('Use of eval',
'eval-used',
'Used when you use the "eval" function, to discourage its '
'usage. Consider using `ast.literal_eval` for safely evaluating '
'strings containing Python expressions '
'from untrusted sources. '),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
'Used when a break or a return statement is found inside the '
'finally clause of a try...finally block: the exceptions raised '
'in the try clause will be silently swallowed instead of being '
're-raised.'),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'W0124': ('Following "as" with another context manager looks like a tuple.',
'confusing-with-statement',
'Emitted when a `with` statement component returns multiple values '
'and uses name binding with `as` only for a part of those values, '
'as in with ctx() as a, b. This can be misleading, since it\'s not '
'clear if the context manager returns a tuple or if the node without '
'a name binding is another context manager.'),
'W0125': ('Using a conditional statement with a constant value',
'using-constant-test',
'Emitted when a conditional statement (If or ternary if) '
'uses a constant value for its test. This might not be what '
'the user intended to do.'),
'E0111': ('The first reversed() argument is not a sequence',
'bad-reversed-sequence',
'Used when the first argument to reversed() builtin '
'isn\'t a sequence (does not implement __reversed__, '
'nor __getitem__ and __len__'),
}
reports = (('RP0101', 'Statistics by type', report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
@utils.check_messages('using-constant-test')
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages('using-constant-test')
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages('using-constant-test')
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda, astroid.FunctionDef, astroid.ClassDef,
astroid.bases.Generator, astroid.UnboundMethod,
astroid.BoundMethod, astroid.Module)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen. The only type
# of node in this list which doesn't have this property is
# Attribute, which is excepted because the conditional statement
# can be used to verify that the attribute was set inside a class,
# which is definitely a valid use case.
except_nodes = (astroid.Attribute, astroid.Call,
astroid.BinOp, astroid.BoolOp, astroid.UnaryOp,
astroid.Subscript)
inferred = None
emit = isinstance(test, (astroid.Const, ) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit or isinstance(inferred, const_nodes):
self.add_message('using-constant-test', node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@utils.check_messages('pointless-statement', 'pointless-string-statement',
'expression-not-assigned')
def visit_expr(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value,
six.string_types):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)):
if isinstance(scope, astroid.FunctionDef) and scope.name != '__init__':
pass
else:
sibling = expr.previous_sibling()
if (sibling is not None and sibling.scope() is scope and
isinstance(sibling, astroid.Assign)):
return
self.add_message('pointless-string-statement', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (isinstance(expr, (astroid.Yield, astroid.Await, astroid.Call)) or
(isinstance(node.parent, astroid.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message('expression-not-assigned', node=node,
args=expr.as_string())
else:
self.add_message('pointless-statement', node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages('unnecessary-lambda')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if (isinstance(node.body.func, astroid.Attribute) and
isinstance(node.body.func.expr, astroid.Call)):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
elif call.kwargs or call.keywords:
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message('unnecessary-lambda', line=node.fromlineno,
node=node)
@utils.check_messages('dangerous-default-value')
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
# check for dangerous default values as arguments
is_iterable = lambda n: isinstance(n, (astroid.List,
astroid.Set,
astroid.Dict))
for default in node.args.defaults:
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (isinstance(value, astroid.Instance) and
value.qname() in DEFAULT_ARGUMENT_SYMBOLS):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = '%s() (%s)' % (value.name, value.qname())
else:
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
# this argument is a name
msg = '%s (%s)' % (default.as_string(),
DEFAULT_ARGUMENT_SYMBOLS[value.qname()])
self.add_message('dangerous-default-value',
node=node,
args=(msg, ))
@utils.check_messages('unreachable', 'lost-exception')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astroid.FunctionDef,))
@utils.check_messages('unreachable')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages('unreachable', 'lost-exception')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,))
@utils.check_messages('unreachable')
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages('exec-used')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('exec-used', node=node)
@utils.check_messages('eval-used', 'exec-used', 'bad-reversed-sequence')
def visit_call(self, node):
"""visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name == 'exec':
self.add_message('exec-used', node=node)
elif name == 'reversed':
self._check_reversed(node)
elif name == 'eval':
self.add_message('eval-used', node=node)
@utils.check_messages('assert-on-tuple')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astroid.Tuple) and \
len(node.test.elts) == 2:
self.add_message('assert-on-tuple', node=node)
@utils.check_messages('duplicate-key')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message('duplicate-key', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('unreachable', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('lost-exception', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.YES:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if (getattr(func, 'name', None) == 'iter' and
utils.is_builtin_object(func)):
self.add_message('bad-reversed-sequence', node=node)
return
if isinstance(argument, astroid.Instance):
if (argument._proxied.name == 'dict' and
utils.is_builtin_object(argument._proxied)):
self.add_message('bad-reversed-sequence', node=node)
return
elif any(ancestor.name == 'dict' and utils.is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message('bad-reversed-sequence', node=node)
return
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message('bad-reversed-sequence', node=node)
elif not isinstance(argument, (astroid.List, astroid.Tuple)):
# everything else is not a proper sequence for reversed()
self.add_message('bad-reversed-sequence', node=node)
@utils.check_messages('confusing-with-statement')
def visit_with(self, node):
if not PY3K:
# in Python 2 a "with" statement with multiple managers coresponds
# to multiple nested AST "With" nodes
pairs = []
parent_node = node.parent
if isinstance(parent_node, astroid.With):
# we only care about the direct parent, since this method
# gets called for each with node anyway
pairs.extend(parent_node.items)
pairs.extend(node.items)
else:
# in PY3K a "with" statement with multiple managers coresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if (isinstance(prev_pair[1], astroid.AssignName) and
(pair[1] is None and not isinstance(pair[0], astroid.Call))):
# don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment
if PY3K or node.lineno == node.parent.lineno:
# if the line number doesn't match
# we assume it's a nested "with"
self.add_message('confusing-with-statement', node=node)
KNOWN_NAME_TYPES = {
"module", "const", "class", "function", "method", "attr",
"argument", "variable", "class_attribute", "inlinevar"
}
HUMAN_READABLE_TYPES = {
'module': 'module',
'const': 'constant',
'class': 'class',
'function': 'function',
'method': 'method',
'attr': 'attribute',
'argument': 'argument',
'variable': 'variable',
'class_attribute': 'class attribute',
'inlinevar': 'inline iteration',
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in KNOWN_NAME_TYPES:
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace('_', '-')
name_options.append((
'%s-naming-style' % (name_type,),
{'default': default_style,
'type': 'choice', 'choices': list(NAMING_STYLES.keys()), 'metavar': '<style>',
'help': 'Naming style matching correct %s names' % (human_readable_name,)}),)
name_options.append((
'%s-rgx' % (name_type,),
{'default': None, 'type': 'regexp', 'metavar': '<regexp>',
'help': 'Regular expression matching correct %s names. Overrides %s-naming-style'
% (human_readable_name, name_type,)}))
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized '
'names).'),
'C0103': ('%s name "%s" doesn\'t conform to %s',
'invalid-name',
'Used when the name doesn\'t conform to naming rules '
'associated to its type (constant, variable, class...).'),
'W0111': ('Name %s will become a keyword in Python %s',
'assign-to-new-keyword',
'Used when assignment will become invalid in future '
'Python release due to introducing new keyword'),
}
options = (('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
('name-group',
{'default' : (),
'type' :'csv', 'metavar' : '<name1:name2>',
'help' : ('Colon-delimited sets of names that determine each'
' other\'s naming style when the name regexes'
' allow several styles.')}
),
('include-naming-hint',
{'default': False, 'type': 'yn', 'metavar': '<y_or_n>',
'help': 'Include a hint for the correct naming format with invalid-name'}
),
('property-classes',
{'default': ('abc.abstractproperty',),
'type': 'csv',
'metavar': '<decorator names>',
'help': 'List of decorators that produce properties, such as '
'abc.abstractproperty. Add to this list to register '
'other decorators that produce valid properties.'}
),
) + _create_naming_options()
KEYWORD_ONSET = {
(3, 0): {'True', 'False'},
(3, 7): {'async', 'await'}
}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0)
for group in self.config.name_group:
for name_type in group.split(':'):
self._name_group[name_type] = 'group_%s' % (group,)
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = "%s_naming_style" % (name_type,)
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = "%s_rgx" % (name_type, )
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages('blacklisted-name', 'invalid-name')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in six.itervalues(self._bad_names):
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in six.itervalues(all_groups):
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group))
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword')
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name('class', node.name, node)
for attr, anodes in six.iteritems(node.instance_attrs):
if not any(node.instance_attr_ancestors(attr)):
self._check_name('attr', attr, anodes[0])
@utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword')
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE)
self._check_name(_determine_function_name_type(node,
config=self.config),
node.name, node, confidence)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages('blacklisted-name', 'invalid-name')
def visit_global(self, node):
for name in node.names:
self._check_name('const', name, node)
@utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword')
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
ass_type = node.assign_type()
if isinstance(ass_type, astroid.Comprehension):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(ass_type, astroid.Assign) and not in_loop(ass_type):
if isinstance(utils.safe_infer(ass_type.value), astroid.ClassDef):
self._check_name('class', node.name, node)
else:
if not _redefines_import(node):
# Don't emit if the name redefines an import
# in an ImportError except handler.
self._check_name('const', node.name, node)
elif isinstance(ass_type, astroid.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
self._check_name('class_attribute', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(self, node, node_type, name, confidence):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
type_label.capitalize(),
name,
hint
)
self.add_message('invalid-name', node=node, args=args,
confidence=confidence)
self.stats['badname_' + node_type] += 1
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('blacklisted-name', node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None:
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message('assign-to-new-keyword',
node=node, args=(name, keyword_first_version),
confidence=interfaces.HIGH)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return '.'.join(map(str, version))
return None
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing %s docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.'
'Some special methods like __init__ doesn\'t necessary require a '
'docstring.'),
'C0112': ('Empty %s docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty '
'docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'function or class names that do not require a '
'docstring.'}
),
('docstring-min-length',
{'default' : -1,
'type' : 'int', 'metavar' : '<int>',
'help': ('Minimum line length for functions/classes that'
' require docstrings, shorter ones are exempt.')}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
@utils.check_messages('missing-docstring', 'empty-docstring')
def visit_module(self, node):
self._check_docstring('module', node)
@utils.check_messages('missing-docstring', 'empty-docstring')
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
@staticmethod
def _is_setter_or_deleter(node):
names = {'setter', 'deleter'}
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Attribute)
and decorator.attrname in names):
return True
return False
@utils.check_messages('missing-docstring', 'empty-docstring')
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = 'method' if node.is_method() else 'function'
if node.decorators and self._is_setter_or_deleter(node):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astroid.FunctionDef):
overridden = True
break
self._check_docstring(ftype, node,
report_missing=not overridden,
confidence=confidence)
else:
self._check_docstring(ftype, node)
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(self, node_type, node, report_missing=True,
confidence=interfaces.HIGH):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
if not report_missing:
return
lines = get_node_last_lineno(node) - node.lineno
if node_type == 'module' and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != 'module' and max_lines > -1 and lines < max_lines:
return
self.stats['undocumented_'+node_type] += 1
if (node.body and isinstance(node.body[0], astroid.Expr) and
isinstance(node.body[0].value, astroid.Call)):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)):
# Strings in Python 3, others in Python 2.
if PY3K and func.bound.name == 'str':
return
elif func.bound.name in ('str', 'unicode', 'bytes'):
return
self.add_message('missing-docstring', node=node, args=(node_type,),
confidence=confidence)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('empty-docstring', node=node, args=(node_type,),
confidence=confidence)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
@utils.check_messages('unnecessary-pass')
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('unnecessary-pass', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.',
{'maxversion': (3, 0)}),
}
@utils.check_messages('deprecated-lambda')
def visit_call(self, node):
"""visit a Call node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astroid.Lambda):
return
infered = utils.safe_infer(node.func)
if (utils.is_builtin_object(infered)
and infered.name in ['map', 'filter']):
self.add_message('deprecated-lambda', node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return (isinstance(call, astroid.Call)
and len(call.args) == 1 and not call.keywords)
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {'C0121': ('Comparison to %s should be %s',
'singleton-comparison',
'Used when an expression is compared to singleton '
'values like True, False or None.'),
'C0122': ('Comparison should be %s',
'misplaced-comparison-constant',
'Used when the constant is placed on the left side '
'of a comparison. It is usually clearer in intent to '
'place it in the right hand side of the comparison.'),
'C0123': ('Using type() instead of isinstance() for a typecheck.',
'unidiomatic-typecheck',
'The idiomatic way to perform an explicit typecheck in '
'Python is to use isinstance(x, Y) rather than '
'type(x) == Y, type(x) is Y. Though there are unusual '
'situations where these give different results.',
{'old_names': [('W0154', 'unidiomatic-typecheck')]}),
'R0123': ('Comparison to literal',
'literal-comparison',
'Used when comparing an object to a literal, which is usually '
'what you do not want to do, since you can compare to a different '
'literal than what was expected altogether.'),
}
def _check_singleton_comparison(self, singleton, root_node):
if singleton.value is True:
suggestion = "just 'expr' or 'expr is True'"
self.add_message('singleton-comparison',
node=root_node,
args=(True, suggestion))
elif singleton.value is False:
suggestion = "'not expr' or 'expr is False'"
self.add_message('singleton-comparison',
node=root_node,
args=(False, suggestion))
elif singleton.value is None:
self.add_message('singleton-comparison',
node=root_node,
args=(None, "'expr is None'"))
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List,
astroid.Tuple,
astroid.Dict,
astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if literal.value in (True, False, None):
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message('literal-comparison', node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = '%s %s %r' % (right.as_string(), operator, left.value)
self.add_message('misplaced-comparison-constant', node=node,
args=(suggestion,))
@utils.check_messages('singleton-comparison', 'misplaced-comparison-constant',
'unidiomatic-typecheck', 'literal-comparison')
def visit_compare(self, node):
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if (operator in ('<', '<=', '>', '>=', '!=', '==')
and isinstance(left, astroid.Const)):
self._check_misplaced_constant(node, left, right, operator)
if operator == '==':
if isinstance(left, astroid.Const):
self._check_singleton_comparison(left, node)
elif isinstance(right, astroid.Const):
self._check_singleton_comparison(right, node)
if operator in ('is', 'is not'):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (isinstance(left_func, astroid.ClassDef)
and left_func.qname() == TYPE_QNAME):
return
if operator in ('is', 'is not') and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message('unidiomatic-typecheck', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
linter.register_checker(ComparisonChecker(linter))
| lucidmotifs/auto-aoc | .venv/lib/python3.5/site-packages/pylint/checkers/base.py | Python | mit | 78,705 | [
"VisIt"
] | e2c39f726b2a242d137da94f8d56f3623d77bed0ad23085dd3bd34b0e3da76bc |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
A simple example to run CASCI calculation.
'''
import pyscf
mol = pyscf.M(
atom = 'O 0 0 0; O 0 0 1.2',
basis = 'ccpvdz',
spin = 2)
myhf = mol.RHF().run()
# 6 orbitals, 8 electrons
mycas = myhf.CASCI(6, 8).run()
#
# Note this mycas object can also be created using the APIs of mcscf module:
#
# from pyscf import mcscf
# mycas = mcscf.CASCI(myhf, 6, 8).run()
# Natural occupancy in CAS space, Mulliken population etc.
mycas.verbose = 4
mycas.analyze()
#
# By default, the output of analyze() method has 6 parts.
#
# First two parts are the natural orbital analysis of the active space. The
# natural orbitals by default was expanded on the "meta-Lowdin" atomic orbitals.
# Meta-lowdin AO is one type of orthogonal orbital, which largely keeps the
# atomic nature of the core and valence space. The character of each orbitals
# can be roughly read based on the square of the coefficients.
#
# Natural occ [1.98127707 1.95671369 1.95671369 1.04270854 1.04270854 0.01987847]
# Natural orbital (expansion on meta-Lowdin AOs) in CAS space
# #1 #2 #3 #4 #5
# 0 O 1s 0.00063 0.00000 0.00000 -0.00000 0.00000
# 0 O 2s 0.30447 0.00000 -0.00000 0.00000 -0.00000
# 0 O 3s 0.04894 -0.00000 -0.00000 -0.00000 0.00000
# 0 O 2px -0.00000 0.05038 0.70413 -0.70572 0.04213
# 0 O 2py -0.00000 0.70413 -0.05038 -0.04213 -0.70572
# 0 O 2pz -0.63298 -0.00000 -0.00000 0.00000 0.00000
# ...
#
# Next part prints the overlap between the canonical MCSCF orbitals and
# HF orbitals of the initial guess. It can be used to measure how close the
# initial guess and the MCSCF results are.
# ...
# <mo_coeff-mcscf|mo_coeff-hf> 12 12 0.60371478
# <mo_coeff-mcscf|mo_coeff-hf> 12 13 0.79720035
# <mo_coeff-mcscf|mo_coeff-hf> 13 12 0.79720035
# <mo_coeff-mcscf|mo_coeff-hf> 13 13 -0.60371478
# <mo_coeff-mcscf|mo_coeff-hf> 14 14 0.99998785
# <mo_coeff-mcscf|mo_coeff-hf> 15 15 -0.61646818
# ...
#
# Next session is the analysis for CI coefficients. This part is not available
# for external FCI solver (such as DMRG, QMC).
#
# ** Largest CI components **
# [alpha occ-orbitals] [beta occ-orbitals] CI coefficient
# [0 1 2 3 4] [0 1 2] 0.973574063441
# [0 1 2 3 4] [0 3 4] -0.187737433798
#
# The last two parts of the output are the Mulliken population analysis. To
# obtain better transferability, the electron population was computed based on
# meta-Lowdin orthogonal orbitals (than the input raw basis which may not
# possess AO character)
#
# ** Mulliken pop on meta-lowdin orthogonal AOs **
# ** Mulliken pop **
# pop of 0 O 1s 1.99999
# pop of 0 O 2s 1.78300
# pop of 0 O 3s 0.00789
# pop of 0 O 2px 1.49626
# pop of 0 O 2py 1.49626
# pop of 0 O 2pz 1.19312
# ...
| sunqm/pyscf | examples/mcscf/00-simple_casci.py | Python | apache-2.0 | 3,014 | [
"PySCF"
] | a792b775e367e5ed9651a6522064cfc0fe8e0725c64297c895da1950234991b6 |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence
* Without a direct way to compute N steps forward, the
semantics of jumpahead(n) are weakened to simply jump
to another distant state and rely on the large period
to avoid overlapping sequences.
* The random() method is implemented in C, executes in
a single Python step, and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandombits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 2 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 2:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth:
return istart + self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return r
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
return int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
Note that for even rather small len(x), the total number of
permutations of x is larger than the period of most random number
generators; this implies that "most" permutations of a long
sequence can never be generated.
"""
if random is None:
random = self.random
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a
# dictionary.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small dictionary and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# dictionary and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError, "sample larger than population"
random = self.random
_int = int
result = [None] * k
if n < 6 * k: # if n len list takes less space than a k len dict
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
n > 0 and (population[0], population[n//2], population[n-1])
except (TypeError, KeyError): # handle sets and dictionaries
population = tuple(population)
selected = {}
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
result[i] = selected[j] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"""Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. (The parameter would be
called "lambda", but that is a reserved word in Python.) Returned
values range from 0 to positive infinity.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > -1 and beta} > -1.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = long(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def _stub(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
seed = jumpahead = _stub
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9-SunOS-i386/lib/python/lib/python2.4/random.py | Python | gpl-2.0 | 29,814 | [
"Gaussian"
] | 8e16a071fb83b96a4a186a8c29a477eee2273c4dfcb14966f792e2e8579c4674 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# 2015-04-13T21:56+08:00
import json
import time
entry = {}
entry['title'] = 'Dive into history, 2009 edition'
entry['article_link'] = 'http://diveintomark.org/archives/2009/03/27/dive-into-history-2009'
entry['comments_link'] = None
entry['internal_id'] = b'\xDE\xD5\xB4\xF8'
entry['tags'] = ('diveintopython', 'docbook', 'html')
entry['published'] = True
entry['published_date'] = time.strptime('Fri Mar 27 22:20:42 2009')
# Even if JSON has no built-in support for bytes, that doesn’t mean you can’t
# serialize bytes objects. The json module provides extensibility hooks for
# encoding and decoding unknown datatypes.
# If you want to encode bytes or other datatypes that JSON doesn’t support
# natively, you need to provide custom encoders and decoders for those types.
try:
with open('entry.json', 'w', encoding = 'utf-8') as f:
json.dump(entry, f)
except Exception as e:
print(e)
# To define your own “mini-serialization format” for a datatype that JSON
# doesn’t support natively, just define a function that takes a Python object
# as a parameter. This Python object will be the actual object that the
# json.dump() function is unable to serialize by itself.
# Your custom serialization function should check the type of the Python object
# that the json.dump() function passed to it. This is not strictly necessary if
# your function only serializes one datatype, but it makes it crystal clear what
# case your function is covering, and it makes it easier to extend if you need to
# add serializations for more datatypes later.
def to_json(python_object):
if isinstance(python_object, time.struct_time):
return {'__class__': 'time.asctime',
'__value__': time.asctime(python_object)}
elif isinstance(python_object, bytes):
return {'__class__': 'bytes',
'__value__': list(python_object)}
# This line is important. The data structure you’re serializing may contain
# types that neither the built-in JSON serializer nor your custom serializer
# can handle. In this case, your custom serializer must raise a TypeError so
# that the json.dump() function knows that your custom serializer did not
# recognize the type.
raise TypeError(repr(python_object) + ' is not JSON serializable')
with open('entry.json', 'w', encoding = 'utf-8') as f:
json.dump(entry, f, default = to_json, indent = 2)
| myd7349/DiveIntoPython3Practices | chapter_13_SerializingPythonObjects/serializing_datatypes_unsupported_by_json.py | Python | lgpl-3.0 | 2,466 | [
"CRYSTAL"
] | 0d33c9a022f19591ce6cfc30cdaa4a9b0951f8c075654e57fbd531cfd6a7e990 |
"""
A simple VTK widget for PyQt or PySide.
See http://www.trolltech.com for Qt documentation,
http://www.riverbankcomputing.co.uk for PyQt, and
http://pyside.github.io for PySide.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
Changes by Rodrigo Mologni, Sep. 2013 (Credit to Daniele Esposti)
Bug fix to PySide: Converts PyCObject to void pointer.
Changes by Greg Schussman, Aug. 2014
The keyPressEvent function now passes keysym instead of None.
Changes by Alex Tsui, Apr. 2015
Port from PyQt4 to PyQt5.
Changes by Fabian Wenzel, Jan. 2016
Support for Python3
"""
# Check whether a specific PyQt implementation was chosen
try:
import vtk.qt
PyQtImpl = vtk.qt.PyQtImpl
except ImportError:
pass
if PyQtImpl is None:
# Autodetect the PyQt implementation to use
try:
import PyQt5
PyQtImpl = "PyQt5"
except ImportError:
try:
import PyQt4
PyQtImpl = "PyQt4"
except ImportError:
try:
import PySide
PyQtImpl = "PySide"
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
if PyQtImpl == "PyQt5":
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QEvent
elif PyQtImpl == "PyQt4":
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QTimer
from PyQt4.QtCore import QObject
from PyQt4.QtCore import QSize
from PyQt4.QtCore import QEvent
elif PyQtImpl == "PySide":
from PySide.QtGui import QWidget
from PySide.QtGui import QSizePolicy
from PySide.QtGui import QApplication
from PySide.QtCore import Qt
from PySide.QtCore import QTimer
from PySide.QtCore import QObject
from PySide.QtCore import QSize
from PySide.QtCore import QEvent
else:
raise ImportError("Unknown PyQt implementation " + repr(PyQtImpl))
class QVTKRenderWindowInteractor(QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = Qt.NoButton
# private attributes
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = Qt.NoModifier
self.__saveButtons = Qt.NoButton
# do special handling of some keywords:
# stereo, rw
try:
stereo = bool(kw['stereo'])
except KeyError:
stereo = False
try:
rw = kw['rw']
except KeyError:
rw = None
# create qt-level widget
QWidget.__init__(self, parent, wflags|Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
WId = self.winId()
# Python2
if type(WId).__name__ == 'PyCObject':
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
# Python3
elif type(WId).__name__ == 'PyCapsule':
from ctypes import pythonapi, c_void_p, py_object, c_char_p
pythonapi.PyCapsule_GetName.restype = c_char_p
pythonapi.PyCapsule_GetName.argtypes = [py_object]
name = pythonapi.PyCapsule_GetName(WId)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
WId = pythonapi.PyCapsule_GetPointer(WId, name)
self._RenderWindow.SetWindowInfo(str(int(WId)))
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
try:
self._Iren = kw['iren']
except KeyError:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(Qt.WheelFocus)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
#Create a hidden child widget and connect its destroyed signal to its
#parent ``Finalize`` slot. The hidden children will be destroyed before
#its parent thus allowing cleanup of VTK elements.
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ +
" has no attribute named " + attr)
def Finalize(self):
'''
Call internal cleanup method on VTK objects
'''
self._RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, Qt.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._Iren.Render()
def resizeEvent(self, ev):
w = self.width()
h = self.height()
vtk.vtkRenderWindow.SetSize(self._RenderWindow, w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & Qt.ShiftModifier:
shift = True
if ev.modifiers() & Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & Qt.ShiftModifier:
shift = True
if self.__saveModifiers & Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = str(ev.text())
else:
key = chr(0)
keySym = _qt_key_to_key_sym(ev.key())
if shift and len(keySym) == 1 and keySym.isalpha():
keySym = keySym.upper()
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, keySym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if ev.delta() >= 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
_keysyms = {
Qt.Key_Backspace: 'BackSpace',
Qt.Key_Tab: 'Tab',
Qt.Key_Backtab: 'Tab',
# Qt.Key_Clear : 'Clear',
Qt.Key_Return: 'Return',
Qt.Key_Enter: 'Return',
Qt.Key_Shift: 'Shift_L',
Qt.Key_Control: 'Control_L',
Qt.Key_Alt: 'Alt_L',
Qt.Key_Pause: 'Pause',
Qt.Key_CapsLock: 'Caps_Lock',
Qt.Key_Escape: 'Escape',
Qt.Key_Space: 'space',
# Qt.Key_Prior : 'Prior',
# Qt.Key_Next : 'Next',
Qt.Key_End: 'End',
Qt.Key_Home: 'Home',
Qt.Key_Left: 'Left',
Qt.Key_Up: 'Up',
Qt.Key_Right: 'Right',
Qt.Key_Down: 'Down',
Qt.Key_SysReq: 'Snapshot',
Qt.Key_Insert: 'Insert',
Qt.Key_Delete: 'Delete',
Qt.Key_Help: 'Help',
Qt.Key_0: '0',
Qt.Key_1: '1',
Qt.Key_2: '2',
Qt.Key_3: '3',
Qt.Key_4: '4',
Qt.Key_5: '5',
Qt.Key_6: '6',
Qt.Key_7: '7',
Qt.Key_8: '8',
Qt.Key_9: '9',
Qt.Key_A: 'a',
Qt.Key_B: 'b',
Qt.Key_C: 'c',
Qt.Key_D: 'd',
Qt.Key_E: 'e',
Qt.Key_F: 'f',
Qt.Key_G: 'g',
Qt.Key_H: 'h',
Qt.Key_I: 'i',
Qt.Key_J: 'j',
Qt.Key_K: 'k',
Qt.Key_L: 'l',
Qt.Key_M: 'm',
Qt.Key_N: 'n',
Qt.Key_O: 'o',
Qt.Key_P: 'p',
Qt.Key_Q: 'q',
Qt.Key_R: 'r',
Qt.Key_S: 's',
Qt.Key_T: 't',
Qt.Key_U: 'u',
Qt.Key_V: 'v',
Qt.Key_W: 'w',
Qt.Key_X: 'x',
Qt.Key_Y: 'y',
Qt.Key_Z: 'z',
Qt.Key_Asterisk: 'asterisk',
Qt.Key_Plus: 'plus',
Qt.Key_Minus: 'minus',
Qt.Key_Period: 'period',
Qt.Key_Slash: 'slash',
Qt.Key_F1: 'F1',
Qt.Key_F2: 'F2',
Qt.Key_F3: 'F3',
Qt.Key_F4: 'F4',
Qt.Key_F5: 'F5',
Qt.Key_F6: 'F6',
Qt.Key_F7: 'F7',
Qt.Key_F8: 'F8',
Qt.Key_F9: 'F9',
Qt.Key_F10: 'F10',
Qt.Key_F11: 'F11',
Qt.Key_F12: 'F12',
Qt.Key_F13: 'F13',
Qt.Key_F14: 'F14',
Qt.Key_F15: 'F15',
Qt.Key_F16: 'F16',
Qt.Key_F17: 'F17',
Qt.Key_F18: 'F18',
Qt.Key_F19: 'F19',
Qt.Key_F20: 'F20',
Qt.Key_F21: 'F21',
Qt.Key_F22: 'F22',
Qt.Key_F23: 'F23',
Qt.Key_F24: 'F24',
Qt.Key_NumLock: 'Num_Lock',
Qt.Key_ScrollLock: 'Scroll_Lock',
}
def _qt_key_to_key_sym(key):
""" Convert a Qt key into a vtk keysym.
This is essentially copied from the c++ implementation in
GUISupport/Qt/QVTKInteractorAdapter.cxx.
"""
if key not in _keysyms:
return None
return _keysyms[key]
if __name__ == "__main__":
print(PyQtImpl)
QVTKRenderWidgetConeExample()
| hlzz/dotfiles | graphics/VTK-7.0.0/Wrapping/Python/vtk/qt/QVTKRenderWindowInteractor.py | Python | bsd-3-clause | 19,228 | [
"CRYSTAL",
"VTK"
] | 99bb68443e59dfaf7b9b457697607d8ce712969603911b7981e5c801f59c89a9 |
#!/usr/bin/env python2
desc="""Report sites with differences between bam files.
"""
epilog="""AUTHOR:
l.p.pryszcz+git@gmail.com
Mizerow, 25/02/2014
"""
import argparse, os, sys
import pysam, subprocess
from datetime import datetime
from bam2snps import _remove_indels
def get_allele_frequency(bases, alphabet="ACGT"):
""" """
bases = _remove_indels(bases)
base2count = {base: 0 for base in alphabet}
for base in bases.upper():
if base in base2count:
base2count[base] += 1
total_bases = sum(base2count.itervalues())
if not total_bases:
return []
return [(b, c*1.0/total_bases) for b, c in base2count.iteritems()]
def bam2difference(fnames, out, minCov, minFreq, homozygous, positions, verbose):
"""
"""
#write header
header = "coordinate\t%s\n" % "\t".join(fnames)
out.write(header)
#open subprocess
args = ['samtools', 'mpileup']
if positions:
args += ["-l", positions.name]
args += fnames
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in proc.stdout:
lData = line.split('\t')
contig, pos, ref = lData[:3]
#process all samples
data = []
for i in range(3, len(lData), 3):
cov, bases, quals = lData[i:i+3]
if int(cov) < minCov:
break
#select alleles base on min freq
alt_alleles = get_allele_frequency(bases)
alleles = [base for base, freq in filter(lambda x: x[1]>=minFreq, alt_alleles)]
#skip if not homozygous
if homozygous and len(alleles)>1:
break
#store
data.append("/".join(sorted(alleles)))
#report only if more than one allele all samples passed filtering
if len(set(data))>1 and len(data)==(len(lData)/3)-1:
out.write("%s:%s\t%s\n" % (contig, pos, "\t".join(data)))
def main():
usage = "%(prog)s [options]"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", "--verbose", default=False, action="store_true")
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-i", "--input", nargs="+",
help="input BAM files")
parser.add_argument("-o", "--output", default=sys.stdout, type=file,
help="output stream [stdout]")
parser.add_argument("-c", "--cov", default=10, type=int,
help="min coverage [%(default)s]")
parser.add_argument("-f", "--freq", default=0.2, type=float,
help="min allele frequency [%(default)s]")
parser.add_argument("--homozygous", action='store_true', default=False,
help="report only homozygous [%(default)s]")
parser.add_argument("--positions", type=file,
help="pre-selected positions [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
bam2difference(o.input, o.output, o.cov, o.freq, o.homozygous, o.positions, o.verbose)
if __name__=='__main__':
t0=datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt=datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
| lpryszcz/bin | bam2difference.py | Python | gpl-3.0 | 3,445 | [
"pysam"
] | b3c6ffa4f484c0f1d85b2f0bc80d09f394eb707992f57c2201b446c10301d7a6 |
"""
This module contains fileIO operations and file conversion for the image
processing tool kit in the NSLS-II data analysis software package.
The functions included in this module focus on reading and writing
netCDF files. This is the file format used by Mark Rivers for
x-ray computed microtomography data collected at Argonne National Laboratory,
Sector 13BMD, GSECars.
"""
from __future__ import division, absolute_import, print_function
import os
from netCDF4 import Dataset
def load_netCDF(file_name):
"""
This function loads the specified netCDF file format data set (e.g.*.volume
APS-Sector 13 GSECARS extension) file into a numpy array for further
analysis.
Required Dependencies
---------------------
netcdf4 : Python/numpy interface to the netCDF ver. 4 library
Package name: netcdf4-python
Install from: https://github.com/Unidata/netcdf4-python
numpy
Cython -- optional
HDF5 C library version 1.8.8 or higher
Install from: ftp://ftp.hdfgroup.org/HDF5/current/src
Be sure to build with '--enable-hl --enable-shared'.
netCDF-4 C library
Install from:
ftp://ftp.unidata.ucar.edu/pub/netcdf. Version 4.1.1 or higher
Be sure to build with '--enable-netcdf-4 --enable-shared', and set
CPPFLAGS="-I $HDF5_DIR/include" and LDFLAGS="-L $HDF5_DIR/lib", where
$HDF5_DIR is the directory where HDF5 was installed.
If you want OPeNDAP support, add '--enable-dap'.
If you want HDF4 SD support, add '--enable-hdf4' and add the location
of the HDF4 headers and library to CPPFLAGS and LDFLAGS.
Parameters
----------
file_name : string
Complete path to the file to be loaded into memory
Returns
-------
md_dict : dict
Dictionary containing all metadata contained in the netCDF file.
This metadata contains data collection, and experiment information
as well as values and variables pertinent to the image data.
data : ndarray
ndarray containing the image data contained in the netCDF file.
The image data is scaled using the scale factor defined in the
netCDF metadata, if a scale factor was recorded during data
acquisition or reconstruction. If a scale factor is not present,
then a default value of 1.0 is used.
"""
with Dataset(os.path.normpath(file_name), 'r') as src_file:
data = src_file.variables['VOLUME']
md_dict = src_file.__dict__
# Check for voxel intensity scale factor and apply if value is present
data /= data.scale_factor if data.scale_factor != 1.0 else 1.0
# Accounts for specific case where z_pixel_size doesn't get assigned
# even though dimensions are actuall isotropic. This occurs when
# reconstruction is completed using tomo_recon on data collected at
# APS-13BMD.
if (md_dict['x_pixel_size'] == md_dict['y_pixel_size'] and
md_dict['z_pixel_size'] == 0.0 and data.shape[0] > 1):
md_dict['voxel_size'] = {'value': md_dict['x_pixel_size'],
'type': float, 'units': ''}
return md_dict, data
| licode/scikit-xray | skbeam/io/net_cdf_io.py | Python | bsd-3-clause | 3,187 | [
"NetCDF"
] | 71787a1fdf66a43c9f10834624bb4927f4f588df1ac6b38140fe4463e51d8923 |
import numpy
from matplotlib import pyplot as plt
class Model:
def __init__(self, train):
self._train = train
def fit(self, beta, grad=True):
return self(self._train, beta, grad=grad)
class MultiModel(Model):
def __init__(self, train, terms):
"""
Arguments:
train {*} - The training data for the model.
terms {list} - A list of models.
"""
Model.__init__(self, train)
self.terms = terms
#self.terms = [term(train) for term in terms]
self._beta0 = []
self._bounds = []
self.slices = []
i = 0
for term in self.terms:
n = len(term.beta0())
self._beta0 += term.beta0()
self._bounds += term.bounds()
self.slices.append(slice(i, i + n))
i += n
def bounds(self):
return self._bounds
def beta0(self):
return self._beta0
def show(self, beta):
for term, slc in zip(self.terms, self.slices):
term.show(beta[slc])
class ProductModel(MultiModel):
def __init__(self, data, terms=[]):
"""
Arguments:
data {*} - The training data for the model.
terms {list} - A list of Model classes.
"""
MultiModel.__init__(self, data, terms)
def __call__(self, x, beta, grad=False):
N = len(x)
prod = lambda x, y: x * y
values = [term(x, beta[slc], grad=grad) for (term, slc) in zip(self.terms, self.slices)]
if grad == False:
return reduce(prod, values)
fs, grads = zip(*values)
# Work on gradient
grad = numpy.zeros((N, len(beta)))
ones = numpy.ones(N)
for i, (term, slc) in enumerate(zip(self.terms, self.slices)):
coeffs = reduce(prod, fs[:i], ones) * reduce(prod, fs[i + 1:], ones)
grad[:, slc] = coeffs.reshape(N, 1) * grads[i]
return reduce(prod, fs), grad
class SumModel(MultiModel):
def __init__(self, data, terms=[]):
"""
Arguments:
data {*} - The training data for the model.
terms {list} - A list of Model classes.
"""
MultiModel.__init__(self, data, terms)
def __call__(self, x, beta, grad=False):
summ = lambda x, y: x + y
values = [term(x, beta[slc], grad=grad) for (term, slc) in zip(self.terms, self.slices)]
if grad == False:
return reduce(summ, values)
fs, grads = zip(*values)
return reduce(summ, fs), numpy.hstack(grads)
class AsymmGaussian(Model):
def __init__(self, train, lo=True, hi=True, name='Asymm Gaussian'):
Model.__init__(self, train)
self.lo = lo
self.hi = hi
self.name = name
def _unpack_beta(self, beta):
if self.lo == False:
return [beta[0], None, beta[1]]
elif self.hi == False:
return beta + [None]
else:
return beta
def bounds(self):
b = [(None, None)]
widths = 2
if self.lo == False or self.hi == False:
widths = 1
return b + widths * [(0.0, None)]
def show(self, beta):
print '%s' % self.name
print ' center: %.1f' % beta[0]
if self.lo:
print ' left width: %.1f' % beta[1]
if self.hi:
if self.lo:
width = beta[2]
else:
width = beta[1]
print ' right width: %.1f' % width
print ' params:', beta
#plt.scatter(self._train, self(self._train, beta, grad=False))
#plt.show()
#plt.clf()
def __call__(self, t, beta, grad=True):
f = numpy.ones(len(t))
mid, dt_lo, dt_hi = self._unpack_beta(beta)
numer = -0.5 * (t - mid)**2
hi = t >= mid
lo = t < mid
if (self.lo):
f[lo] = numpy.exp(numer[lo] / dt_lo**2)
if (self.hi):
f[hi] = numpy.exp(numer[hi] / dt_hi**2)
if grad == False:
return f
grad = numpy.zeros((len(t), len(beta)))
diff = (t - mid)
diff2 = diff**2
if self.lo:
# d f / d mid
grad[lo, 0] = diff[lo] * f[lo] / dt_lo**2
# d f / d dt_lo
grad[lo, 1] = diff2[lo] * f[lo] / dt_lo**3
if self.hi:
if self.lo:
index = 2
else:
index = 1
# d f / d mid
grad[hi, 0] = diff[hi] * f[hi] / dt_hi**2
# d f / d dt_hi
grad[hi, index] = diff2[hi] * f[hi] / dt_hi**3
return f, grad
| bauerca/BikeShareDemand_Carl | models.py | Python | gpl-2.0 | 4,087 | [
"Gaussian"
] | 27a0d8ebe205679fc187b960454bfd6b9a60cd78540295939f650c264884d436 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) the C(size) option
is required.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
pvs:
version_added: "2.2"
description:
- Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
required: false
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol: vg=firefly lv=test size=512
# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
- lvol: vg=firefly lv=test size=512 pvs=/dev/sda,/dev/sdb
# Create cache pool logical volume
- lvol: vg=firefly lv=lvcache size=512m opts='--type cache-pool'
# Create a logical volume of 512g.
- lvol: vg=firefly lv=test size=512g
# Create a logical volume the size of all remaining space in the volume group
- lvol: vg=firefly lv=test size=100%FREE
# Create a logical volume with special options
- lvol: vg=firefly lv=test size=512g opts="-r 16"
# Extend the logical volume to 1024m.
- lvol: vg=firefly lv=test size=1024
# Extend the logical volume to consume all remaining space in the volume group
- lvol: vg=firefly lv=test size=+100%FREE
# Extend the logical volume to take all remaining space of the PVs
- lvol: vg=firefly lv=test size=100%PVS
# Resize the logical volume to % of VG
- lvol: vg-firefly lv=test size=80%VG force=yes
# Reduce the logical volume to 512m
- lvol: vg=firefly lv=test size=512 force=yes
# Remove the logical volume.
- lvol: vg=firefly lv=test state=absent force=yes
# Create a snapshot volume of the test logical volume.
- lvol: vg=firefly lv=test snapshot=snap1 size=100m
'''
import re
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[','').replace(']',''),
'size': int(decimal_point.match(parts[1]).group(1))
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
snapshot=dict(type='str', default=None),
pvs=dict(type='str')
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found == None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit(): raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
else:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
msg = ''
if this_lv is None:
if state == 'present':
### create LV
if module.check_mode:
changed = True
else:
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if module.check_mode:
module.exit_json(changed=True)
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif size_opt == 'l':
### Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
elif this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if module.check_mode:
changed = True
else:
cmd = "%s -%s %s%s %s/%s %s" % (tool, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
### resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if module.check_mode:
changed = True
else:
cmd = "%s -%s %s%s %s/%s %s" % (tool, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| milad-soufastai/ansible-modules-extras | system/lvol.py | Python | gpl-3.0 | 14,624 | [
"Firefly"
] | 62cf2714a9f00a2c9a8e5093db98f999b1003790d528cfd1a856012c60f8e0be |
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ar-dz', gettext_noop('Algerian Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('uz', gettext_noop('Uzbek')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
LANGUAGE_COOKIE_SECURE = False
LANGUAGE_COOKIE_HTTPONLY = False
LANGUAGE_COOKIE_SAMESITE = None
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'DENY'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the HttpOnly flag.
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# The minimum number of seconds a password reset link is valid for
# (default: 3 days).
PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter'
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_REFERRER_POLICY = 'same-origin'
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| kaedroho/django | django/conf/global_settings.py | Python | bsd-3-clause | 22,212 | [
"VisIt"
] | cf88a291528d17b86a5645ddecdc25096ef9d50721f46c3de00df526cc9bdc7f |
#!@PYTHON_EXECUTABLE@
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import json
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description="Psi4: Open-Source Quantum Chemistry", formatter_class=RawTextHelpFormatter)
parser.add_argument("-i", "--input", default="input.dat",
help="Input file name. Default: input.dat.")
parser.add_argument("-o", "--output", help="""\
Redirect output elsewhere.
Default: when input filename is 'input.dat', 'output.dat'.
Otherwise, output filename defaults to input filename with
any '.in' or 'dat' extension replaced by '.out'""")
parser.add_argument("-a", "--append", action='store_true',
help="Appends results to output file. Default: Truncate first")
parser.add_argument("-V", "--version", action='store_true',
help="Prints version information.")
parser.add_argument("-n", "--nthread", default=1,
help="Number of threads to use. Psi4 disregards OMP_NUM_THREADS/MKL_NUM_THREADS.")
parser.add_argument("-s", "--scratch",
help="Scratch directory to use. Overrides PSI_SCRATCH.")
parser.add_argument("-m", "--messy", action='store_true',
help="Leaves temporary files after the run is completed.")
# parser.add_argument("-d", "--debug", action='store_true', help="Flush the outfile at every print statement.")
# parser.add_argument("-r", "--restart", action='store_true', help="Number to be used instead of process id.")
parser.add_argument("-p", "--prefix",
help="Prefix name for psi files. Default psi")
parser.add_argument("--psiapi-path", action='store_true',
help="""Generates a bash command to source correct Python """
"""interpreter and path for ``python -c "import psi4"``""")
parser.add_argument("-v", "--verbose", action='store_true', help="Prints Psithon to Python translation.")
parser.add_argument("--inplace", action='store_true',
help="Runs Psi4 from the source directory. !Warning! expert option.")
parser.add_argument("-l", "--psidatadir",
help="Specifies where to look for the Psi4 data directory. Overrides PSIDATADIR. !Warning! expert option.")
parser.add_argument("-k", "--skip-preprocessor", action='store_true',
help="Skips input preprocessing. !Warning! expert option.")
parser.add_argument("--json", action='store_true',
help="Runs a JSON input file. !Warning! experimental option.")
parser.add_argument("-t", "--test", action='store_true',
help="Runs smoke tests.")
# For plugins
parser.add_argument("--plugin-name", help="""\
Creates a new directory with files for writing a new plugin.
You can specify an additional argument that specifies a
template to use, for example
>>> psi4 --plugin-name mygreatcode --plugin-template mointegrals""")
parser.add_argument('--plugin-template', default='basic',
choices=['aointegrals', 'basic', 'dfmp2', 'mointegrals', 'scf', 'sointegrals', 'wavefunction'],
help='Selects new plugin template to use.')
parser.add_argument('--plugin-compile', action='store_true', help="""\
Generates a CMake command for building a plugin against this Psi4 installation.
>>> cd <plugin_directory>
>>> `psi4 --plugin-compile`
>>> make
>>> psi4""")
# print("Environment Variables\n");
# print(" PSI_SCRATCH Directory where scratch files are written.")
# print(" Default: $TMPDIR (or /tmp/ when not set)")
# print(" This should be a local, not network, disk")
# parser.print_help()
args, unknown = parser.parse_known_args()
args = args.__dict__ # Namespace object seems silly
# Figure out pythonpath
cmake_install_prefix = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + '..')
lib_dir = os.path.sep.join([cmake_install_prefix, "@CMAKE_INSTALL_LIBDIR@", "@PYMOD_INSTALL_LIBDIR@"])
if args["inplace"]:
if "CMAKE_INSTALL_LIBDIR" not in lib_dir:
raise ImportError("Cannot run inplace from a installed directory.")
core_location = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "core.so"
if not os.path.isfile(core_location):
raise ImportError("A compiled Psi4 core.so needs to be symlinked to the %s folder" % os.path.dirname(__file__))
lib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if ("PSIDATADIR" not in os.environ.keys()) and (not args["psidatadir"]):
data_dir = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "share", "psi4"])
os.environ["PSIDATADIR"] = data_dir
elif "CMAKE_INSTALL_LIBDIR" in lib_dir:
raise ImportError("Psi4 was not installed correctly!")
# Replace input/output if unknown kwargs
if len(unknown) > 0:
args["input"] = unknown[0]
if len(unknown) > 1:
args["output"] = unknown[1]
if len(unknown) > 2:
raise KeyError("Too many unknown arguments: %s" % str(unknown))
# Figure out output arg
if args["output"] is None:
if args["input"] == "input.dat":
args["output"] = "output.dat"
elif args["input"].endswith(".in"):
args["output"] = args["input"].replace(".in", ".out")
elif args["input"].endswith(".dat"):
args["output"] = args["input"].replace(".dat", ".out")
else:
args["output"] = args["input"] + ".dat"
# Plugin compile line
if args['plugin_compile']:
share_cmake_dir = os.path.sep.join([cmake_install_prefix, 'share', 'cmake', 'psi4'])
print("""cmake -C {}/psi4PluginCache.cmake -DCMAKE_PREFIX_PATH={} .""".format(share_cmake_dir, cmake_install_prefix))
sys.exit()
if args['psiapi_path']:
pyexe_dir = os.path.dirname("@PYTHON_EXECUTABLE@")
print("""export PATH={}:$PATH\nexport PYTHONPATH={}:$PYTHONPATH""".format(pyexe_dir, lib_dir))
sys.exit()
# Transmit any argument psidatadir through environ
if args["psidatadir"] is not None:
data_dir = os.path.abspath(os.path.expanduser(args["psidatadir"]))
os.environ["PSIDATADIR"] = data_dir
### Actually import psi4 and apply setup ###
# Import installed psi4
sys.path.insert(1, lib_dir)
import psi4
if args["version"]:
print(psi4.__version__)
sys.exit()
if args['plugin_name']:
# This call does not return.
psi4.plugin.create_plugin(args['plugin_name'], args['plugin_template'])
if args["test"]:
psi4.test()
sys.exit()
if not os.path.isfile(args["input"]):
raise KeyError("The file %s does not exist." % args["input"])
args["input"] = os.path.normpath(args["input"])
# Setup outfile
if args["append"] is None:
args["append"] = False
if args["output"] != "stdout":
psi4.core.set_output_file(args["output"], args["append"])
# Set a few options
if args["prefix"] is not None:
psi4.core.set_psi_file_prefix(args["prefix"])
psi4.core.set_num_threads(int(args["nthread"]), quiet=True)
psi4.core.set_memory_bytes(524288000, True)
psi4.extras._input_dir_ = os.path.dirname(os.path.abspath(args["input"]))
psi4.print_header()
# Prepare scratch for inputparser
if args["scratch"] is not None:
if not os.path.isdir(args["scratch"]):
raise Exception("Passed in scratch is not a directory (%s)." % args["scratch"])
psi4.core.set_environment("PSI_SCRATCH", args["scratch"])
# If this is a json call, compute and stop
if args["json"]:
with open(args["input"], 'r') as f:
json_data = json.load(f)
psi4.extras._success_flag_ = True
psi4.extras.exit_printing()
psi4.json_wrapper.run_json(json_data)
with open(args["input"], 'w') as f:
json.dump(json_data, f)
if args["output"] != "stdout":
os.unlink(args["output"])
sys.exit()
# Read input
with open(args["input"]) as f:
content = f.read()
# Preprocess
if not args["skip_preprocessor"]:
# PSI_SCRATCH must be set before this call!
content = psi4.process_input(content)
# Handle Verbose
if args["verbose"]:
psi4.core.print_out('\nParsed Psithon:')
psi4.core.print_out(content)
psi4.core.print_out('-' * 75)
# Handle Messy
if args["messy"]:
import atexit
if sys.version_info >= (3, 0):
atexit.unregister(psi4.core.clean)
else:
for handler in atexit._exithandlers:
if handler[0] == psi4.core.clean:
atexit._exithandlers.remove(handler)
# Register exit printing, failure GOTO coffee ELSE beer
import atexit
atexit.register(psi4.extras.exit_printing)
# Run the program!
try:
exec(content)
psi4.extras._success_flag_ = True
# Capture _any_ python error message
except Exception as exception:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
tb_str = "Traceback (most recent call last):\n"
tb_str += ''.join(traceback.format_tb(exc_traceback))
tb_str += '\n'
tb_str += type(exception).__name__
tb_str += ': '
tb_str += str(exception)
psi4.core.print_out("\n")
psi4.core.print_out(tb_str)
psi4.core.print_out("\n")
if psi4.core.get_output_file() != "stdout":
print(tb_str)
sys.exit(1)
# elif '***HDF5 library version mismatched error***' in str(err):
# raise ImportError("{0}\nLikely cause: HDF5 used in compilation not prominent enough in RPATH/[DY]LD_LIBRARY_PATH".format(err))
| mhlechner/psi4 | psi4/run_psi4.py | Python | gpl-2.0 | 10,262 | [
"Psi4"
] | 693033d3a479b51988b8a9403e2b14afaf98be806c434b87f9796ecbbf04b8af |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import py2to3, PY3
if PY3:
unicode = str
@py2to3
class ItemList(object):
__slots__ = ['_item_class', '_common_attrs', '_items']
def __init__(self, item_class, common_attrs=None, items=None):
self._item_class = item_class
self._common_attrs = common_attrs
self._items = ()
if items:
self.extend(items)
def create(self, *args, **kwargs):
return self.append(self._item_class(*args, **kwargs))
def append(self, item):
self._check_type_and_set_attrs(item)
self._items += (item,)
return item
def _check_type_and_set_attrs(self, *items):
common_attrs = self._common_attrs or {}
for item in items:
if not isinstance(item, self._item_class):
raise TypeError("Only %s objects accepted, got %s."
% (self._item_class.__name__,
item.__class__.__name__))
for attr in common_attrs:
setattr(item, attr, common_attrs[attr])
def extend(self, items):
self._check_type_and_set_attrs(*items)
self._items += tuple(items)
def index(self, item, *start_and_end):
return self._items.index(item, *start_and_end)
def clear(self):
self._items = ()
def visit(self, visitor):
for item in self:
item.visit(visitor)
def __iter__(self):
return iter(self._items)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._items[index]
items = self.__class__(self._item_class)
items._common_attrs = self._common_attrs
items.extend(self._items[index])
return items
def __setitem__(self, index, item):
if isinstance(index, slice):
self._check_type_and_set_attrs(*item)
else:
self._check_type_and_set_attrs(item)
items = list(self._items)
items[index] = item
self._items = tuple(items)
def __len__(self):
return len(self._items)
def __unicode__(self):
return u'[%s]' % ', '.join(unicode(item) for item in self)
| moto-timo/robotframework | src/robot/model/itemlist.py | Python | apache-2.0 | 2,798 | [
"VisIt"
] | fb7dfbf0348217c2db21d5161af88d532ccdc408eb0a91626b553e5597c3514c |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.datacatalog_v1beta1.services.data_catalog import (
DataCatalogAsyncClient,
)
from google.cloud.datacatalog_v1beta1.services.data_catalog import DataCatalogClient
from google.cloud.datacatalog_v1beta1.services.data_catalog import pagers
from google.cloud.datacatalog_v1beta1.services.data_catalog import transports
from google.cloud.datacatalog_v1beta1.types import common
from google.cloud.datacatalog_v1beta1.types import datacatalog
from google.cloud.datacatalog_v1beta1.types import gcs_fileset_spec
from google.cloud.datacatalog_v1beta1.types import schema
from google.cloud.datacatalog_v1beta1.types import search
from google.cloud.datacatalog_v1beta1.types import table_spec
from google.cloud.datacatalog_v1beta1.types import tags
from google.cloud.datacatalog_v1beta1.types import timestamps
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DataCatalogClient._get_default_mtls_endpoint(None) is None
assert (
DataCatalogClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
DataCatalogClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DataCatalogClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DataCatalogClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert DataCatalogClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [DataCatalogClient, DataCatalogAsyncClient,])
def test_data_catalog_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datacatalog.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DataCatalogGrpcTransport, "grpc"),
(transports.DataCatalogGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_data_catalog_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [DataCatalogClient, DataCatalogAsyncClient,])
def test_data_catalog_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datacatalog.googleapis.com:443"
def test_data_catalog_client_get_transport_class():
transport = DataCatalogClient.get_transport_class()
available_transports = [
transports.DataCatalogGrpcTransport,
]
assert transport in available_transports
transport = DataCatalogClient.get_transport_class("grpc")
assert transport == transports.DataCatalogGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DataCatalogClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DataCatalogClient)
)
@mock.patch.object(
DataCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataCatalogAsyncClient),
)
def test_data_catalog_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DataCatalogClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DataCatalogClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", "true"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", "false"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DataCatalogClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DataCatalogClient)
)
@mock.patch.object(
DataCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataCatalogAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_data_catalog_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [DataCatalogClient, DataCatalogAsyncClient])
@mock.patch.object(
DataCatalogClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DataCatalogClient)
)
@mock.patch.object(
DataCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataCatalogAsyncClient),
)
def test_data_catalog_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_data_catalog_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", grpc_helpers),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_data_catalog_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_data_catalog_client_client_options_from_dict():
with mock.patch(
"google.cloud.datacatalog_v1beta1.services.data_catalog.transports.DataCatalogGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DataCatalogClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", grpc_helpers),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_data_catalog_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"datacatalog.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="datacatalog.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [datacatalog.SearchCatalogRequest, dict,])
def test_search_catalog(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.SearchCatalogResponse(
next_page_token="next_page_token_value",
)
response = client.search_catalog(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.SearchCatalogRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchCatalogPager)
assert response.next_page_token == "next_page_token_value"
def test_search_catalog_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
client.search_catalog()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.SearchCatalogRequest()
@pytest.mark.asyncio
async def test_search_catalog_async(
transport: str = "grpc_asyncio", request_type=datacatalog.SearchCatalogRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.SearchCatalogResponse(next_page_token="next_page_token_value",)
)
response = await client.search_catalog(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.SearchCatalogRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchCatalogAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_catalog_async_from_dict():
await test_search_catalog_async(request_type=dict)
def test_search_catalog_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.SearchCatalogResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_catalog(
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
)
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
def test_search_catalog_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_catalog(
datacatalog.SearchCatalogRequest(),
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
@pytest.mark.asyncio
async def test_search_catalog_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.SearchCatalogResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.SearchCatalogResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_catalog(
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
)
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_catalog_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_catalog(
datacatalog.SearchCatalogRequest(),
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
def test_search_catalog_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(results=[], next_page_token="def",),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(),], next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(), search.SearchCatalogResult(),],
),
RuntimeError,
)
metadata = ()
pager = client.search_catalog(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, search.SearchCatalogResult) for i in results)
def test_search_catalog_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(results=[], next_page_token="def",),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(),], next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(), search.SearchCatalogResult(),],
),
RuntimeError,
)
pages = list(client.search_catalog(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_catalog_async_pager():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_catalog), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(results=[], next_page_token="def",),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(),], next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(), search.SearchCatalogResult(),],
),
RuntimeError,
)
async_pager = await client.search_catalog(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, search.SearchCatalogResult) for i in responses)
@pytest.mark.asyncio
async def test_search_catalog_async_pages():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_catalog), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(results=[], next_page_token="def",),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(),], next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[search.SearchCatalogResult(), search.SearchCatalogResult(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_catalog(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [datacatalog.CreateEntryGroupRequest, dict,])
def test_create_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_create_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
client.create_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryGroupRequest()
@pytest.mark.asyncio
async def test_create_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_entry_group_async_from_dict():
await test_create_entry_group_async(request_type=dict)
def test_create_entry_group_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
call.return_value = datacatalog.EntryGroup()
client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_entry_group_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
await client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_entry_group_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_entry_group(
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_group_id
mock_val = "entry_group_id_value"
assert arg == mock_val
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
def test_create_entry_group_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_entry_group(
datacatalog.CreateEntryGroupRequest(),
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_entry_group_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_entry_group(
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_group_id
mock_val = "entry_group_id_value"
assert arg == mock_val
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_entry_group(
datacatalog.CreateEntryGroupRequest(),
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
@pytest.mark.parametrize("request_type", [datacatalog.UpdateEntryGroupRequest, dict,])
def test_update_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_update_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
client.update_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryGroupRequest()
@pytest.mark.asyncio
async def test_update_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_entry_group_async_from_dict():
await test_update_entry_group_async(request_type=dict)
def test_update_entry_group_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryGroupRequest()
request.entry_group.name = "entry_group.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
call.return_value = datacatalog.EntryGroup()
client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entry_group.name=entry_group.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_entry_group_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryGroupRequest()
request.entry_group.name = "entry_group.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
await client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entry_group.name=entry_group.name/value",) in kw[
"metadata"
]
def test_update_entry_group_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_entry_group(
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_entry_group_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_entry_group(
datacatalog.UpdateEntryGroupRequest(),
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_entry_group_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_entry_group(
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_entry_group(
datacatalog.UpdateEntryGroupRequest(),
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [datacatalog.GetEntryGroupRequest, dict,])
def test_get_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_get_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
client.get_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryGroupRequest()
@pytest.mark.asyncio
async def test_get_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.GetEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_entry_group_async_from_dict():
await test_get_entry_group_async(request_type=dict)
def test_get_entry_group_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
call.return_value = datacatalog.EntryGroup()
client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_entry_group_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
await client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_entry_group_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_entry_group(
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].read_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_get_entry_group_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_entry_group(
datacatalog.GetEntryGroupRequest(),
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_get_entry_group_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_entry_group(
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].read_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_entry_group(
datacatalog.GetEntryGroupRequest(),
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [datacatalog.DeleteEntryGroupRequest, dict,])
def test_delete_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
client.delete_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryGroupRequest()
@pytest.mark.asyncio
async def test_delete_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_entry_group_async_from_dict():
await test_delete_entry_group_async(request_type=dict)
def test_delete_entry_group_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
call.return_value = None
client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_entry_group_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_entry_group_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_entry_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_entry_group_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_entry_group(
datacatalog.DeleteEntryGroupRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_entry_group_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_entry_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_entry_group(
datacatalog.DeleteEntryGroupRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datacatalog.ListEntryGroupsRequest, dict,])
def test_list_entry_groups(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntryGroupsResponse(
next_page_token="next_page_token_value",
)
response = client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntryGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntryGroupsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_entry_groups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
client.list_entry_groups()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntryGroupsRequest()
@pytest.mark.asyncio
async def test_list_entry_groups_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ListEntryGroupsRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntryGroupsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntryGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntryGroupsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_entry_groups_async_from_dict():
await test_list_entry_groups_async(request_type=dict)
def test_list_entry_groups_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntryGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
call.return_value = datacatalog.ListEntryGroupsResponse()
client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_entry_groups_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntryGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntryGroupsResponse()
)
await client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_entry_groups_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntryGroupsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_entry_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_entry_groups_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_entry_groups(
datacatalog.ListEntryGroupsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_entry_groups_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntryGroupsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntryGroupsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_entry_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_entry_groups_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_entry_groups(
datacatalog.ListEntryGroupsRequest(), parent="parent_value",
)
def test_list_entry_groups_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[], next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(),], next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(), datacatalog.EntryGroup(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_entry_groups(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, datacatalog.EntryGroup) for i in results)
def test_list_entry_groups_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[], next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(),], next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(), datacatalog.EntryGroup(),],
),
RuntimeError,
)
pages = list(client.list_entry_groups(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_entry_groups_async_pager():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[], next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(),], next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(), datacatalog.EntryGroup(),],
),
RuntimeError,
)
async_pager = await client.list_entry_groups(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datacatalog.EntryGroup) for i in responses)
@pytest.mark.asyncio
async def test_list_entry_groups_async_pages():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[], next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(),], next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[datacatalog.EntryGroup(), datacatalog.EntryGroup(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_entry_groups(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [datacatalog.CreateEntryRequest, dict,])
def test_create_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
)
response = client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_create_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
client.create_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryRequest()
@pytest.mark.asyncio
async def test_create_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_entry_async_from_dict():
await test_create_entry_async(request_type=dict)
def test_create_entry_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
call.return_value = datacatalog.Entry()
client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_entry_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
await client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_entry_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_entry(
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_id
mock_val = "entry_id_value"
assert arg == mock_val
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
def test_create_entry_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_entry(
datacatalog.CreateEntryRequest(),
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_entry_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_entry(
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_id
mock_val = "entry_id_value"
assert arg == mock_val
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_entry_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_entry(
datacatalog.CreateEntryRequest(),
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
@pytest.mark.parametrize("request_type", [datacatalog.UpdateEntryRequest, dict,])
def test_update_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
)
response = client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_update_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
client.update_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryRequest()
@pytest.mark.asyncio
async def test_update_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_entry_async_from_dict():
await test_update_entry_async(request_type=dict)
def test_update_entry_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryRequest()
request.entry.name = "entry.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
call.return_value = datacatalog.Entry()
client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entry.name=entry.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_entry_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryRequest()
request.entry.name = "entry.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
await client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entry.name=entry.name/value",) in kw["metadata"]
def test_update_entry_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_entry(
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_entry_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_entry(
datacatalog.UpdateEntryRequest(),
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_entry_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_entry(
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_entry_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_entry(
datacatalog.UpdateEntryRequest(),
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [datacatalog.DeleteEntryRequest, dict,])
def test_delete_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
client.delete_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryRequest()
@pytest.mark.asyncio
async def test_delete_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_entry_async_from_dict():
await test_delete_entry_async(request_type=dict)
def test_delete_entry_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
call.return_value = None
client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_entry_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_entry_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_entry(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_entry_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_entry(
datacatalog.DeleteEntryRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_entry_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_entry(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_entry_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_entry(
datacatalog.DeleteEntryRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datacatalog.GetEntryRequest, dict,])
def test_get_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
)
response = client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_get_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
client.get_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryRequest()
@pytest.mark.asyncio
async def test_get_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.GetEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_entry_async_from_dict():
await test_get_entry_async(request_type=dict)
def test_get_entry_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
call.return_value = datacatalog.Entry()
client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_entry_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
await client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_entry_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_entry(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_entry_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_entry(
datacatalog.GetEntryRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_entry_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_entry(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_entry_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_entry(
datacatalog.GetEntryRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datacatalog.LookupEntryRequest, dict,])
def test_lookup_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
)
response = client.lookup_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.LookupEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_lookup_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_entry), "__call__") as call:
client.lookup_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.LookupEntryRequest()
@pytest.mark.asyncio
async def test_lookup_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.LookupEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.lookup_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.LookupEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_lookup_entry_async_from_dict():
await test_lookup_entry_async(request_type=dict)
@pytest.mark.parametrize("request_type", [datacatalog.ListEntriesRequest, dict,])
def test_list_entries(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntriesResponse(
next_page_token="next_page_token_value",
)
response = client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntriesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_entries_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
client.list_entries()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntriesRequest()
@pytest.mark.asyncio
async def test_list_entries_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ListEntriesRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntriesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntriesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_entries_async_from_dict():
await test_list_entries_async(request_type=dict)
def test_list_entries_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
call.return_value = datacatalog.ListEntriesResponse()
client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_entries_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntriesResponse()
)
await client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_entries_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntriesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_entries(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_entries_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_entries(
datacatalog.ListEntriesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_entries_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntriesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_entries(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_entries_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_entries(
datacatalog.ListEntriesRequest(), parent="parent_value",
)
def test_list_entries_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(entries=[], next_page_token="def",),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(),], next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(), datacatalog.Entry(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_entries(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, datacatalog.Entry) for i in results)
def test_list_entries_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(entries=[], next_page_token="def",),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(),], next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(), datacatalog.Entry(),],
),
RuntimeError,
)
pages = list(client.list_entries(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_entries_async_pager():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entries), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(entries=[], next_page_token="def",),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(),], next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(), datacatalog.Entry(),],
),
RuntimeError,
)
async_pager = await client.list_entries(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datacatalog.Entry) for i in responses)
@pytest.mark.asyncio
async def test_list_entries_async_pages():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entries), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(entries=[], next_page_token="def",),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(),], next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[datacatalog.Entry(), datacatalog.Entry(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_entries(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [datacatalog.CreateTagTemplateRequest, dict,])
def test_create_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate(
name="name_value", display_name="display_name_value",
)
response = client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_create_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
client.create_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateRequest()
@pytest.mark.asyncio
async def test_create_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplate(name="name_value", display_name="display_name_value",)
)
response = await client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_create_tag_template_async_from_dict():
await test_create_tag_template_async(request_type=dict)
def test_create_tag_template_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
call.return_value = tags.TagTemplate()
client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tag_template_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
await client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tag_template_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag_template(
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_id
mock_val = "tag_template_id_value"
assert arg == mock_val
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
def test_create_tag_template_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag_template(
datacatalog.CreateTagTemplateRequest(),
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_template_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag_template(
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_id
mock_val = "tag_template_id_value"
assert arg == mock_val
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag_template(
datacatalog.CreateTagTemplateRequest(),
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
@pytest.mark.parametrize("request_type", [datacatalog.GetTagTemplateRequest, dict,])
def test_get_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate(
name="name_value", display_name="display_name_value",
)
response = client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_get_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
client.get_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetTagTemplateRequest()
@pytest.mark.asyncio
async def test_get_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.GetTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplate(name="name_value", display_name="display_name_value",)
)
response = await client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_get_tag_template_async_from_dict():
await test_get_tag_template_async(request_type=dict)
def test_get_tag_template_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetTagTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
call.return_value = tags.TagTemplate()
client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tag_template_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetTagTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
await client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tag_template_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tag_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tag_template_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tag_template(
datacatalog.GetTagTemplateRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tag_template_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tag_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tag_template(
datacatalog.GetTagTemplateRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datacatalog.UpdateTagTemplateRequest, dict,])
def test_update_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate(
name="name_value", display_name="display_name_value",
)
response = client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_update_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
client.update_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateRequest()
@pytest.mark.asyncio
async def test_update_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplate(name="name_value", display_name="display_name_value",)
)
response = await client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_update_tag_template_async_from_dict():
await test_update_tag_template_async(request_type=dict)
def test_update_tag_template_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateRequest()
request.tag_template.name = "tag_template.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
call.return_value = tags.TagTemplate()
client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tag_template.name=tag_template.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tag_template_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateRequest()
request.tag_template.name = "tag_template.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
await client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tag_template.name=tag_template.name/value",
) in kw["metadata"]
def test_update_tag_template_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag_template(
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_template_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag_template(
datacatalog.UpdateTagTemplateRequest(),
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_template_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag_template(
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag_template(
datacatalog.UpdateTagTemplateRequest(),
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [datacatalog.DeleteTagTemplateRequest, dict,])
def test_delete_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
client.delete_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateRequest()
@pytest.mark.asyncio
async def test_delete_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tag_template_async_from_dict():
await test_delete_tag_template_async(request_type=dict)
def test_delete_tag_template_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
call.return_value = None
client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_template_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tag_template_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag_template(
name="name_value", force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
def test_delete_tag_template_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag_template(
datacatalog.DeleteTagTemplateRequest(), name="name_value", force=True,
)
@pytest.mark.asyncio
async def test_delete_tag_template_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag_template(name="name_value", force=True,)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag_template(
datacatalog.DeleteTagTemplateRequest(), name="name_value", force=True,
)
@pytest.mark.parametrize(
"request_type", [datacatalog.CreateTagTemplateFieldRequest, dict,]
)
def test_create_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
order=540,
)
response = client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.order == 540
def test_create_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
client.create_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_create_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.CreateTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
order=540,
)
)
response = await client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.order == 540
@pytest.mark.asyncio
async def test_create_tag_template_field_async_from_dict():
await test_create_tag_template_field_async(request_type=dict)
def test_create_tag_template_field_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateFieldRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateFieldRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tag_template_field_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag_template_field(
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_field_id
mock_val = "tag_template_field_id_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
def test_create_tag_template_field_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag_template_field(
datacatalog.CreateTagTemplateFieldRequest(),
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag_template_field(
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_field_id
mock_val = "tag_template_field_id_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag_template_field(
datacatalog.CreateTagTemplateFieldRequest(),
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [datacatalog.UpdateTagTemplateFieldRequest, dict,]
)
def test_update_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
order=540,
)
response = client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.order == 540
def test_update_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
client.update_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_update_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.UpdateTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
order=540,
)
)
response = await client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.order == 540
@pytest.mark.asyncio
async def test_update_tag_template_field_async_from_dict():
await test_update_tag_template_field_async(request_type=dict)
def test_update_tag_template_field_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_update_tag_template_field_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag_template_field(
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_template_field_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag_template_field(
datacatalog.UpdateTagTemplateFieldRequest(),
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag_template_field(
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag_template_field(
datacatalog.UpdateTagTemplateFieldRequest(),
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [datacatalog.RenameTagTemplateFieldRequest, dict,]
)
def test_rename_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
order=540,
)
response = client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.order == 540
def test_rename_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
client.rename_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_rename_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.RenameTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
order=540,
)
)
response = await client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.order == 540
@pytest.mark.asyncio
async def test_rename_tag_template_field_async_from_dict():
await test_rename_tag_template_field_async(request_type=dict)
def test_rename_tag_template_field_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.RenameTagTemplateFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_rename_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.RenameTagTemplateFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_rename_tag_template_field_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rename_tag_template_field(
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].new_tag_template_field_id
mock_val = "new_tag_template_field_id_value"
assert arg == mock_val
def test_rename_tag_template_field_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rename_tag_template_field(
datacatalog.RenameTagTemplateFieldRequest(),
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
@pytest.mark.asyncio
async def test_rename_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rename_tag_template_field(
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].new_tag_template_field_id
mock_val = "new_tag_template_field_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_rename_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rename_tag_template_field(
datacatalog.RenameTagTemplateFieldRequest(),
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
@pytest.mark.parametrize(
"request_type", [datacatalog.DeleteTagTemplateFieldRequest, dict,]
)
def test_delete_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
client.delete_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_delete_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.DeleteTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tag_template_field_async_from_dict():
await test_delete_tag_template_field_async(request_type=dict)
def test_delete_tag_template_field_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
call.return_value = None
client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tag_template_field_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag_template_field(
name="name_value", force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
def test_delete_tag_template_field_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag_template_field(
datacatalog.DeleteTagTemplateFieldRequest(), name="name_value", force=True,
)
@pytest.mark.asyncio
async def test_delete_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag_template_field(
name="name_value", force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag_template_field(
datacatalog.DeleteTagTemplateFieldRequest(), name="name_value", force=True,
)
@pytest.mark.parametrize("request_type", [datacatalog.CreateTagRequest, dict,])
def test_create_tag(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
column="column_value",
)
response = client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
def test_create_tag_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
client.create_tag()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagRequest()
@pytest.mark.asyncio
async def test_create_tag_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateTagRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
)
)
response = await client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
@pytest.mark.asyncio
async def test_create_tag_async_from_dict():
await test_create_tag_async(request_type=dict)
def test_create_tag_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
call.return_value = tags.Tag()
client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tag_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
await client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tag_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag(
parent="parent_value", tag=tags.Tag(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
def test_create_tag_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag(
datacatalog.CreateTagRequest(),
parent="parent_value",
tag=tags.Tag(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag(
parent="parent_value", tag=tags.Tag(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag(
datacatalog.CreateTagRequest(),
parent="parent_value",
tag=tags.Tag(name="name_value"),
)
@pytest.mark.parametrize("request_type", [datacatalog.UpdateTagRequest, dict,])
def test_update_tag(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
column="column_value",
)
response = client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
def test_update_tag_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
client.update_tag()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagRequest()
@pytest.mark.asyncio
async def test_update_tag_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateTagRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
)
)
response = await client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
@pytest.mark.asyncio
async def test_update_tag_async_from_dict():
await test_update_tag_async(request_type=dict)
def test_update_tag_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagRequest()
request.tag.name = "tag.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
call.return_value = tags.Tag()
client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tag.name=tag.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tag_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagRequest()
request.tag.name = "tag.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
await client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tag.name=tag.name/value",) in kw["metadata"]
def test_update_tag_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag(
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag(
datacatalog.UpdateTagRequest(),
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag(
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag(
datacatalog.UpdateTagRequest(),
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [datacatalog.DeleteTagRequest, dict,])
def test_delete_tag(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tag_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
client.delete_tag()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagRequest()
@pytest.mark.asyncio
async def test_delete_tag_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteTagRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tag_async_from_dict():
await test_delete_tag_async(request_type=dict)
def test_delete_tag_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
call.return_value = None
client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tag_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tag_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag(
datacatalog.DeleteTagRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tag_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag(
datacatalog.DeleteTagRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datacatalog.ListTagsRequest, dict,])
def test_list_tags(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListTagsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListTagsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTagsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tags_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
client.list_tags()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListTagsRequest()
@pytest.mark.asyncio
async def test_list_tags_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ListTagsRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListTagsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListTagsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTagsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tags_async_from_dict():
await test_list_tags_async(request_type=dict)
def test_list_tags_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListTagsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
call.return_value = datacatalog.ListTagsResponse()
client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tags_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListTagsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListTagsResponse()
)
await client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tags_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListTagsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tags(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tags_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tags(
datacatalog.ListTagsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tags_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListTagsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListTagsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tags(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tags_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tags(
datacatalog.ListTagsRequest(), parent="parent_value",
)
def test_list_tags_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[tags.Tag(), tags.Tag(), tags.Tag(),], next_page_token="abc",
),
datacatalog.ListTagsResponse(tags=[], next_page_token="def",),
datacatalog.ListTagsResponse(tags=[tags.Tag(),], next_page_token="ghi",),
datacatalog.ListTagsResponse(tags=[tags.Tag(), tags.Tag(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tags(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tags.Tag) for i in results)
def test_list_tags_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[tags.Tag(), tags.Tag(), tags.Tag(),], next_page_token="abc",
),
datacatalog.ListTagsResponse(tags=[], next_page_token="def",),
datacatalog.ListTagsResponse(tags=[tags.Tag(),], next_page_token="ghi",),
datacatalog.ListTagsResponse(tags=[tags.Tag(), tags.Tag(),],),
RuntimeError,
)
pages = list(client.list_tags(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tags_async_pager():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tags), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[tags.Tag(), tags.Tag(), tags.Tag(),], next_page_token="abc",
),
datacatalog.ListTagsResponse(tags=[], next_page_token="def",),
datacatalog.ListTagsResponse(tags=[tags.Tag(),], next_page_token="ghi",),
datacatalog.ListTagsResponse(tags=[tags.Tag(), tags.Tag(),],),
RuntimeError,
)
async_pager = await client.list_tags(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tags.Tag) for i in responses)
@pytest.mark.asyncio
async def test_list_tags_async_pages():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tags), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[tags.Tag(), tags.Tag(), tags.Tag(),], next_page_token="abc",
),
datacatalog.ListTagsResponse(tags=[], next_page_token="def",),
datacatalog.ListTagsResponse(tags=[tags.Tag(),], next_page_token="ghi",),
datacatalog.ListTagsResponse(tags=[tags.Tag(), tags.Tag(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tags(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_set_iam_policy_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
def test_get_iam_policy(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_get_iam_policy_flattened_error():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = DataCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DataCatalogClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DataCatalogClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DataCatalogGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DataCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DataCatalogGrpcTransport,)
def test_data_catalog_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DataCatalogTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_data_catalog_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.datacatalog_v1beta1.services.data_catalog.transports.DataCatalogTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DataCatalogTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"search_catalog",
"create_entry_group",
"update_entry_group",
"get_entry_group",
"delete_entry_group",
"list_entry_groups",
"create_entry",
"update_entry",
"delete_entry",
"get_entry",
"lookup_entry",
"list_entries",
"create_tag_template",
"get_tag_template",
"update_tag_template",
"delete_tag_template",
"create_tag_template_field",
"update_tag_template_field",
"rename_tag_template_field",
"delete_tag_template_field",
"create_tag",
"update_tag",
"delete_tag",
"list_tags",
"set_iam_policy",
"get_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_data_catalog_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.datacatalog_v1beta1.services.data_catalog.transports.DataCatalogTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DataCatalogTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_data_catalog_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.datacatalog_v1beta1.services.data_catalog.transports.DataCatalogTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DataCatalogTransport()
adc.assert_called_once()
def test_data_catalog_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DataCatalogClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport,],
)
def test_data_catalog_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DataCatalogGrpcTransport, grpc_helpers),
(transports.DataCatalogGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_data_catalog_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"datacatalog.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="datacatalog.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport],
)
def test_data_catalog_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_data_catalog_host_no_port():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datacatalog.googleapis.com"
),
)
assert client.transport._host == "datacatalog.googleapis.com:443"
def test_data_catalog_host_with_port():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datacatalog.googleapis.com:8000"
),
)
assert client.transport._host == "datacatalog.googleapis.com:8000"
def test_data_catalog_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DataCatalogGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_data_catalog_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DataCatalogGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport],
)
def test_data_catalog_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport],
)
def test_data_catalog_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entry_path():
project = "squid"
location = "clam"
entry_group = "whelk"
entry = "octopus"
expected = "projects/{project}/locations/{location}/entryGroups/{entry_group}/entries/{entry}".format(
project=project, location=location, entry_group=entry_group, entry=entry,
)
actual = DataCatalogClient.entry_path(project, location, entry_group, entry)
assert expected == actual
def test_parse_entry_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"entry_group": "cuttlefish",
"entry": "mussel",
}
path = DataCatalogClient.entry_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_entry_path(path)
assert expected == actual
def test_entry_group_path():
project = "winkle"
location = "nautilus"
entry_group = "scallop"
expected = "projects/{project}/locations/{location}/entryGroups/{entry_group}".format(
project=project, location=location, entry_group=entry_group,
)
actual = DataCatalogClient.entry_group_path(project, location, entry_group)
assert expected == actual
def test_parse_entry_group_path():
expected = {
"project": "abalone",
"location": "squid",
"entry_group": "clam",
}
path = DataCatalogClient.entry_group_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_entry_group_path(path)
assert expected == actual
def test_tag_path():
project = "whelk"
location = "octopus"
entry_group = "oyster"
entry = "nudibranch"
tag = "cuttlefish"
expected = "projects/{project}/locations/{location}/entryGroups/{entry_group}/entries/{entry}/tags/{tag}".format(
project=project,
location=location,
entry_group=entry_group,
entry=entry,
tag=tag,
)
actual = DataCatalogClient.tag_path(project, location, entry_group, entry, tag)
assert expected == actual
def test_parse_tag_path():
expected = {
"project": "mussel",
"location": "winkle",
"entry_group": "nautilus",
"entry": "scallop",
"tag": "abalone",
}
path = DataCatalogClient.tag_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_path(path)
assert expected == actual
def test_tag_template_path():
project = "squid"
location = "clam"
tag_template = "whelk"
expected = "projects/{project}/locations/{location}/tagTemplates/{tag_template}".format(
project=project, location=location, tag_template=tag_template,
)
actual = DataCatalogClient.tag_template_path(project, location, tag_template)
assert expected == actual
def test_parse_tag_template_path():
expected = {
"project": "octopus",
"location": "oyster",
"tag_template": "nudibranch",
}
path = DataCatalogClient.tag_template_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_template_path(path)
assert expected == actual
def test_tag_template_field_path():
project = "cuttlefish"
location = "mussel"
tag_template = "winkle"
field = "nautilus"
expected = "projects/{project}/locations/{location}/tagTemplates/{tag_template}/fields/{field}".format(
project=project, location=location, tag_template=tag_template, field=field,
)
actual = DataCatalogClient.tag_template_field_path(
project, location, tag_template, field
)
assert expected == actual
def test_parse_tag_template_field_path():
expected = {
"project": "scallop",
"location": "abalone",
"tag_template": "squid",
"field": "clam",
}
path = DataCatalogClient.tag_template_field_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_template_field_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DataCatalogClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = DataCatalogClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = DataCatalogClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = DataCatalogClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = DataCatalogClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = DataCatalogClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = DataCatalogClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = DataCatalogClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DataCatalogClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = DataCatalogClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DataCatalogTransport, "_prep_wrapped_messages"
) as prep:
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DataCatalogTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DataCatalogClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport),
(DataCatalogAsyncClient, transports.DataCatalogGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-datacatalog | tests/unit/gapic/datacatalog_v1beta1/test_data_catalog.py | Python | apache-2.0 | 296,123 | [
"Octopus"
] | b7c1ea950232098b9e067aaff2465052fc909d1b9e6fb12f59e5f1cc7c9f99c7 |
#!/usr/bin/env python3
# Copyright (C) 2013,2014,2018-2019 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script writes the sample list of features to myconfig-sample.hpp
import fileinput
import inspect
import sys
import os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..', 'src'))
import featuredefs
if len(sys.argv) != 2:
print("Usage: {} DEFFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename = sys.argv[1]
defs = featuredefs.defs(deffilename)
featuresdone = set()
for line in fileinput.input(deffilename):
line = line.strip()
# Handle empty and comment lines
if not line:
print()
continue
elif line.startswith('#'):
continue
elif line.startswith('//') or line.startswith('/*'):
print(line)
continue
# Tokenify line
feature = line.split(None, 1)[0]
if feature in defs.features and feature not in featuresdone:
print('//#define %s' % feature)
featuresdone.add(feature)
| KaiSzuttor/espresso | src/config/gen_sampleconfig.py | Python | gpl-3.0 | 1,753 | [
"ESPResSo"
] | faccec11a25c7578940527465a5b6ce15c36e2f830de869dd2d49d622fde70b8 |
from pyml.cluster.base import ClusterBase
from pyml.utils import set_seed
import warnings
class KMeans(ClusterBase):
def __init__(self, k=3, initialisation='Forgy', max_iterations=100, min_change=1,
seed=None, norm='l1'):
"""
KMeans implementation
:type k: int
:type initialisation: str
:type max_iterations: int
:type min_change: int
:type seed: None or int
:type norm: str or int
:param k: number of clusters
:param initialisation: indicates method to initialise clusters (currently Forgy or Random)
:param max_iterations: maximum number of iterations
:param min_change: minimum assignment changes after each iteration required to continue algorithm
:param seed: sets random seed
:param norm: norm to use in the calculation of distances between each point and all centroids,
e.g. 'l2' or 2 are equivalent to using the euclidean distance
Example:
--------
>>> from pyml.cluster import KMeans
>>> from pyml.datasets.random_data import gaussian
>>> from pyml.preprocessing import train_test_split
>>> datapoints, labels = gaussian(n=100, d=2, labels=3, sigma=0.1, seed=1970)
>>> X_train, y_train, X_test, y_test = train_test_split(datapoints, labels, train_split=0.95, seed=1970)
>>> kmeans = KMeans(k=3, max_iterations=1000, seed=1970)
>>> _ = kmeans.train(X_train, y_train)
>>> kmeans.iterations
7
>>> kmeans.centroids[1]
[0.12801075816403754, 0.21926563270201577]
"""
ClusterBase.__init__(self)
if isinstance(norm, int) or norm in ['l1', 'l2']:
self._norm = norm
else:
raise ValueError("Unknown norm.")
self._seed = set_seed(seed)
self._k = k
self._max_iterations = max_iterations
self._min_change = min_change
if initialisation in ['Forgy', 'Random']:
self._initialisation = initialisation
else:
raise ValueError("Unknown initialisation method.")
def _train(self, X, y=None):
"""
KMeans clustering to determine the position of centroids and cluster assignment given number of clusters (k)
Algorithm:
----------
1. Initiate centroid coordinates
2. Assign cluster labels to each point of X
3. Update centroid coordinates of each cluster (average of each dimension of all point in a cluster)
4. Repeat 2 and 3 until reaching one of the stopping criteria
:type X: list
:type y: None
:param X: list of size N of lists (all of size M) to perform KMeans on
:param y: None
:rtype: object
:return: self
"""
self._X = X
self._y = y
self._n = len(X)
if self.n < self.k:
raise ValueError("Number of clusters should be lower than the number of data points, "
"instead got {} datapoints for {} clusters".format(self.n, self.k))
self._initialise_centroids()
self._dimensions = len(X[0])
self._iterations = 0
self._cluster_assignment = []
change = self.n
self._labels = self._assign_cluster(self._X)
while self.iterations < self.max_iterations and self._min_change < change:
self._old_indices = [self._get_cluster(i) for i in range(self.k)]
self._update_centroids()
self._labels = self._assign_cluster(self._X)
self._iterations += 1
change = self._changes()
if change > self._min_change:
warnings.warn("Failed to converge within {} iterations, consider increasing max_iterations".
format(self.max_iterations))
def _predict(self, X):
"""
Predict cluster assignment using centroids from training step
:param X: list of size N of lists (all of size M) to perform prediction
:rtype: list
:return: list of label predictions
"""
return self._assign_cluster(X)
def _update_centroids(self):
"""
Update rule of KMeans.
Directly updates _centroids attribute
:return: None
"""
self._centroids = [self._get_cluster_mean(x) for x in range(self.k)]
@property
def k(self):
"""
Number of clusters
:getter: Returns the number of clusters k
:type: int
"""
return self._k
@property
def n(self):
"""
Number of training examples
:getter: Returns the number of training examples
:type: int
"""
return self._n
@property
def iterations(self):
"""
Number of iterations of KMeans (if train method has been called)
:getter: Returns the number of KMeans algorithm iterations
:type: int
"""
return self._iterations
@property
def max_iterations(self):
"""
Maximum number of iterations to run KMeans for.
:getter: Returns the maximum number of iterations
:type: int
"""
return self._max_iterations
@property
def min_change(self):
"""
Minimum label changes per iteration.
:getter: Returns the minimum number of changes per iteration
:type: int
"""
return self._min_change
@property
def seed(self):
"""
Random seed.
:getter: Returns the random seed number.
:type: int
"""
return self._seed
@property
def centroids(self):
"""
List of centroid coordinates
:getter: Returns a list of lists with centroid coordinates
:type: list
"""
return self._centroids
@property
def norm(self):
"""
Norm for distance calculation
:getter: Returns the norm used for distance calculations
:type: int
"""
return self._norm
| gf712/PyML | pyml/cluster/kmeans.py | Python | mit | 6,082 | [
"Gaussian"
] | e44c3e8cb17cede9c3654c218801d4200f3af16971222eb16b2cd0cc6496bf66 |
import os
from ase import Atom, Atoms
from ase.io import PickleTrajectory
co = Atoms([Atom('C', (0, 0, 0)),
Atom('O', (0, 0, 1.2))])
traj = PickleTrajectory('1.traj', 'w', co)
for i in range(5):
co.positions[:, 2] += 0.1
traj.write()
del traj
traj = PickleTrajectory('1.traj', 'a')
co = traj[-1]
print co.positions
co.positions[:] += 1
traj.write(co)
del traj
t = PickleTrajectory('1.traj', 'a')
print t[-1].positions
print '.--------'
for a in t:
print 1, a.positions[-1,2]
co.positions[:] += 1
t.write(co)
for a in t:
print 2, a.positions[-1,2]
assert len(t) == 7
co[0].number = 1
try:
t.write(co)
except ValueError:
pass
else:
assert False
co[0].number = 6
co.pbc = True
try:
t.write(co)
except ValueError:
pass
else:
assert False
co.pbc = False
o = co.pop(1)
try:
t.write(co)
except ValueError:
pass
else:
assert False
co.append(o)
t.write(co)
# append to a nonexisting file
fname = '2.traj'
if os.path.isfile(fname):
os.remove(fname)
t = PickleTrajectory(fname, 'a', co)
del(t)
os.remove(fname)
| slabanja/ase | ase/test/trajectory.py | Python | gpl-2.0 | 1,075 | [
"ASE"
] | ff0063bd6714448cbaa1e4719030891c41b6f69742128a74f70211251b65b567 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Remove cullers so single vertex will render
ren1 = vtk.vtkRenderer()
ren1.GetCullers().RemoveAllItems()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
cell = vtk.vtkGenericCell()
ptIds = vtk.vtkIdList()
# 0D
ZeroDPts = vtk.vtkPoints()
ZeroDPts.SetNumberOfPoints(1)
ZeroDPts.SetPoint(0,0,0,0)
ZeroDGrid = vtk.vtkStructuredGrid()
ZeroDGrid.SetDimensions(1,1,1)
ZeroDGrid.SetPoints(ZeroDPts)
ZeroDGrid.GetCell(0)
ZeroDGrid.GetCell(0,cell)
ZeroDGrid.GetCellPoints(0,ptIds)
ZeroDGeom = vtk.vtkStructuredGridGeometryFilter()
ZeroDGeom.SetInputData(ZeroDGrid)
ZeroDGeom.SetExtent(0,2,0,2,0,2)
ZeroDMapper = vtk.vtkPolyDataMapper()
ZeroDMapper.SetInputConnection(ZeroDGeom.GetOutputPort())
ZeroDActor = vtk.vtkActor()
ZeroDActor.SetMapper(ZeroDMapper)
ZeroDActor.SetPosition(0,0,0)
ren1.AddActor(ZeroDActor)
# 1D - X
XPts = vtk.vtkPoints()
XPts.SetNumberOfPoints(2)
XPts.SetPoint(0,0,0,0)
XPts.SetPoint(1,1,0,0)
XGrid = vtk.vtkStructuredGrid()
XGrid.SetDimensions(2,1,1)
XGrid.SetPoints(XPts)
XGrid.GetCell(0)
XGrid.GetCell(0,cell)
XGrid.GetCellPoints(0,ptIds)
XGeom = vtk.vtkStructuredGridGeometryFilter()
XGeom.SetInputData(XGrid)
XGeom.SetExtent(0,2,0,2,0,2)
XMapper = vtk.vtkPolyDataMapper()
XMapper.SetInputConnection(XGeom.GetOutputPort())
XActor = vtk.vtkActor()
XActor.SetMapper(XMapper)
XActor.SetPosition(2,0,0)
ren1.AddActor(XActor)
# 1D - Y
YPts = vtk.vtkPoints()
YPts.SetNumberOfPoints(2)
YPts.SetPoint(0,0,0,0)
YPts.SetPoint(1,0,1,0)
YGrid = vtk.vtkStructuredGrid()
YGrid.SetDimensions(1,2,1)
YGrid.SetPoints(YPts)
YGrid.GetCell(0)
YGrid.GetCell(0,cell)
YGrid.GetCellPoints(0,ptIds)
YGeom = vtk.vtkStructuredGridGeometryFilter()
YGeom.SetInputData(YGrid)
YGeom.SetExtent(0,2,0,2,0,2)
YMapper = vtk.vtkPolyDataMapper()
YMapper.SetInputConnection(YGeom.GetOutputPort())
YActor = vtk.vtkActor()
YActor.SetMapper(YMapper)
YActor.SetPosition(4,0,0)
ren1.AddActor(YActor)
# 1D - Z
ZPts = vtk.vtkPoints()
ZPts.SetNumberOfPoints(2)
ZPts.SetPoint(0,0,0,0)
ZPts.SetPoint(1,0,0,1)
ZGrid = vtk.vtkStructuredGrid()
ZGrid.SetDimensions(1,1,2)
ZGrid.SetPoints(ZPts)
ZGrid.GetCell(0)
ZGrid.GetCell(0,cell)
ZGrid.GetCellPoints(0,ptIds)
ZGeom = vtk.vtkStructuredGridGeometryFilter()
ZGeom.SetInputData(ZGrid)
ZGeom.SetExtent(0,2,0,2,0,2)
ZMapper = vtk.vtkPolyDataMapper()
ZMapper.SetInputConnection(ZGeom.GetOutputPort())
ZActor = vtk.vtkActor()
ZActor.SetMapper(ZMapper)
ZActor.SetPosition(6,0,0)
ren1.AddActor(ZActor)
# 2D - XY
XYPts = vtk.vtkPoints()
XYPts.SetNumberOfPoints(4)
XYPts.SetPoint(0,0,0,0)
XYPts.SetPoint(1,1,0,0)
XYPts.SetPoint(2,0,1,0)
XYPts.SetPoint(3,1,1,0)
XYGrid = vtk.vtkStructuredGrid()
XYGrid.SetDimensions(2,2,1)
XYGrid.SetPoints(XYPts)
XYGrid.GetCell(0)
XYGrid.GetCell(0,cell)
XYGrid.GetCellPoints(0,ptIds)
XYGeom = vtk.vtkStructuredGridGeometryFilter()
XYGeom.SetInputData(XYGrid)
XYGeom.SetExtent(0,2,0,2,0,2)
XYMapper = vtk.vtkPolyDataMapper()
XYMapper.SetInputConnection(XYGeom.GetOutputPort())
XYActor = vtk.vtkActor()
XYActor.SetMapper(XYMapper)
XYActor.SetPosition(0,2,0)
ren1.AddActor(XYActor)
# 2D - YZ
YZPts = vtk.vtkPoints()
YZPts.SetNumberOfPoints(4)
YZPts.SetPoint(0,0,0,0)
YZPts.SetPoint(1,0,1,0)
YZPts.SetPoint(2,0,0,1)
YZPts.SetPoint(3,0,1,1)
YZGrid = vtk.vtkStructuredGrid()
YZGrid.SetDimensions(1,2,2)
YZGrid.SetPoints(YZPts)
YZGrid.GetCell(0)
YZGrid.GetCell(0,cell)
YZGrid.GetCellPoints(0,ptIds)
YZGeom = vtk.vtkStructuredGridGeometryFilter()
YZGeom.SetInputData(YZGrid)
YZGeom.SetExtent(0,2,0,2,0,2)
YZMapper = vtk.vtkPolyDataMapper()
YZMapper.SetInputConnection(YZGeom.GetOutputPort())
YZActor = vtk.vtkActor()
YZActor.SetMapper(YZMapper)
YZActor.SetPosition(2,2,0)
ren1.AddActor(YZActor)
# 2D - XZ
XZPts = vtk.vtkPoints()
XZPts.SetNumberOfPoints(4)
XZPts.SetPoint(0,0,0,0)
XZPts.SetPoint(1,1,0,0)
XZPts.SetPoint(2,0,0,1)
XZPts.SetPoint(3,1,0,1)
XZGrid = vtk.vtkStructuredGrid()
XZGrid.SetDimensions(2,1,2)
XZGrid.SetPoints(XZPts)
XZGrid.GetCell(0)
XZGrid.GetCell(0,cell)
XZGrid.GetCellPoints(0,ptIds)
XZGeom = vtk.vtkStructuredGridGeometryFilter()
XZGeom.SetInputData(XZGrid)
XZGeom.SetExtent(0,2,0,2,0,2)
XZMapper = vtk.vtkPolyDataMapper()
XZMapper.SetInputConnection(XZGeom.GetOutputPort())
XZActor = vtk.vtkActor()
XZActor.SetMapper(XZMapper)
XZActor.SetPosition(4,2,0)
ren1.AddActor(XZActor)
# 3D
XYZPts = vtk.vtkPoints()
XYZPts.SetNumberOfPoints(8)
XYZPts.SetPoint(0,0,0,0)
XYZPts.SetPoint(1,1,0,0)
XYZPts.SetPoint(2,0,1,0)
XYZPts.SetPoint(3,1,1,0)
XYZPts.SetPoint(4,0,0,1)
XYZPts.SetPoint(5,1,0,1)
XYZPts.SetPoint(6,0,1,1)
XYZPts.SetPoint(7,1,1,1)
XYZGrid = vtk.vtkStructuredGrid()
XYZGrid.SetDimensions(2,2,2)
XYZGrid.SetPoints(XYZPts)
XYZGrid.GetCell(0)
XYZGrid.GetCell(0,cell)
XYZGrid.GetCellPoints(0,ptIds)
XYZGeom = vtk.vtkStructuredGridGeometryFilter()
XYZGeom.SetInputData(XYZGrid)
XYZGeom.SetExtent(0,2,0,2,0,2)
XYZMapper = vtk.vtkPolyDataMapper()
XYZMapper.SetInputConnection(XYZGeom.GetOutputPort())
XYZActor = vtk.vtkActor()
XYZActor.SetMapper(XYZMapper)
XYZActor.SetPosition(6,2,0)
ren1.AddActor(XYZActor)
# render the image
#
renWin.SetSize(300,150)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(2.27407,14.9819)
cam1.SetFocalPoint(3.1957,1.74012,0.176603)
cam1.SetPosition(-0.380779,6.13894,5.59404)
cam1.SetViewUp(0.137568,0.811424,-0.568037)
renWin.Render()
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Common/DataModel/Testing/Python/TestStructuredGrid.py | Python | gpl-3.0 | 5,545 | [
"VTK"
] | 4f20940f0c79be310040e4b97038a8475bec3e19a7c327a0ae8091a08fa75183 |
# Andrew Miller <amiller@cs.ucf.edu> 2011
#
# BlockPlayer - 3D model reconstruction using the Lattice-First algorithm
# See:
# "Interactive 3D Model Acquisition and Tracking of Building Block Structures"
# Andrew Miller, Brandyn White, Emiko Charbonneau, Zach Kanzler, and Joseph J. LaViola Jr.
# IEEE VR 2012, IEEE TVGC 2012
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import numpy as np
import preprocess
import opencl
import lattice
import config
import os
import dataset
import stencil
from ctypes import POINTER as PTR, c_byte, c_size_t, c_float
speedup_ctypes = np.ctypeslib.load_library('speedup_ctypes.so',
os.path.dirname(__file__))
speedup_ctypes.histogram.argtypes = [PTR(c_byte), PTR(c_float), PTR(c_float),
c_size_t, c_size_t, c_size_t, c_size_t]
def initialize():
b_width = [config.bounds[1][i]-config.bounds[0][i]
for i in range(3)]
global occ, vac, color, color_count, previous_estimate, good_alignment
occ = np.zeros(b_width)>0
vac = np.zeros(b_width)>0
color = np.zeros((b_width[0], b_width[1], b_width[2], 3),'u1')
color_count = np.zeros(b_width,'i')
good_alignment = False
previous_estimate = None
if not 'previous_estimate' in globals():
previous_estimate=occ=vac=occ_stencil=vac_stencil=color=color_count=None
good_alignment=None
initialize()
def gt2grid(gtstr, chars='*rR'):
g = np.array(map(lambda _: map(lambda __: tuple(__), _), eval(gtstr)))
g = np.rollaxis(g,1)
res = (g==chars[0])
for c in chars[1:]:
res = np.logical_or(res, g==c)
return np.ascontiguousarray(res)
def grid2gt(occ):
m = np.choose(occ, (' ', '*'))
layers = [[''.join(_) for _ in m[:,i,:]]
for i in range(m.shape[1])]
import pprint
return pprint.pformat(layers)
def initialize_with_groundtruth(GT):
initialize()
global occ, vac
occ[:,:] = GT
vac[:,:] = ~GT
def window_correction(occA, vacA, occB, vacB):
""" Finds the translation/rotation components that align B with A,
minimizing an objective function between them. The objective function
rewards occA and occB cells that agree, and penalizes ones that disagree.
Weighted contributions:
occA && occB: -p/4
occA && vacB or vacA && occB: +p
other: 0
Params:
occA, vacA: boolean 3-d array (the old one)
occB, vacB: boolean 3-d array (the new one)
Returns:
(x,_,z),err:
x,_,z are corrective translations from B to A, such that
B = np.roll(B,x,0)
B = np.roll(B,z,2)
would modify B so that it lines up optimally with A.
err is the value of the objective function minimized by (x,_,z)
"""
occR = dict([(r, np.swapaxes(np.rot90(np.swapaxes(occB,1,2), r), 1,2))
for r in (0,1,2,3)])
vacR = dict([(r, np.swapaxes(np.rot90(np.swapaxes(vacB,1,2), r), 1,2))
for r in (0,1,2,3)])
#print [occ.shape for occ in occR.values()]
def error(t):
occB = occR[t[3]]
vacB = vacR[t[3]]
nv = np.roll(occB, t[0], 0)
nv = np.roll(nv, t[2], 2)
nc = np.roll(vacB, t[0], 0)
nc = np.roll(nc, t[2], 2)
return np.sum(np.minimum(nc,occA) + np.minimum(nv,vacA) -
np.minimum(nv,occA)/4.)
t = [(x,y,z,r)
for x in [0,-1,1,-2,2]
for y in [0]
for z in [0,-1,1,-2,2]
for r in [0]]
vals = [error(_) for _ in t]
#print vals
return t[np.argmin(vals)], np.min(vals)
def xcorr_correction(A, B):
"""
Find the best fit parameters between two voxel grids (e.g. a ground truth
and an output) using convolution
Params:
A: boolean grid of output
B: boolean grid of ground truth
"""
def convolve(p1, p2):
import scipy.signal
#cc = scipy.signal.correlate2d(p1[1],p2[1],'same')
cc = scipy.signal.fftconvolve(p1,p2[::-1,::-1])
return cc
def best(cc):
ind = np.argmax(cc)
x,z = np.unravel_index(ind, cc.shape)
x -= cc.shape[0]/2
z -= cc.shape[0]/2
return x,z,cc.max()
# Try all four rotations
global convs
sA = [A.sum(i) for i in range(3)]
sB = [B.sum(i) for i in range(3)]
convs = [convolve(sA[1], np.rot90(sB[1], r)) for r in range(4)]
cc = [best(_) for _ in convs]
r = np.argmax([_[2] for _ in cc])
x,z,_ = cc[r]
B = apply_correction(B, x, z, r)
(bx,_,bz,br),e = window_correction(A,A&0,B,~B)
B = apply_correction(B, bx, bz, br)
err = float((A&~B).sum()+(B&~A).sum()) / B.sum()
return A, B, err, (x,z,r), (bx,bz,br)
def show_votegrid(vg, color=(1,0,0), opacity=1):
from enthought.mayavi import mlab
mlab.clf()
x,y,z = np.nonzero(vg>0)
if not color is None:
mlab.points3d(x,y,z,
opacity=opacity,
color=color,
scale_factor=1, scale_mode='none', mode='cube')
else:
mlab.points3d(x,y,z,vg[vg>0],
opacity=opacity,
scale_factor=1, scale_mode='none', mode='cube')
gridmin, gridmax = config.bounds
X,Y,Z = np.array(gridmax)-gridmin
#mlab.axes(extent=[0,0,0,X,Y,Z])
mlab.draw()
def has_previous_estimate():
global previous_estimate
return not previous_estimate is None
def apply_correction(grid, bx,bz,rot):
return np.roll(np.roll(\
np.swapaxes(np.rot90(np.swapaxes(grid,1,2),rot),1,2),
bx, 0), bz, 2)
def center(R_aligned, occ_new, vac_new):
bx,_,bz = [config.GRIDRAD-int(np.round(_.mean()))
for _ in occ_new.nonzero()]
occ_new = apply_correction(occ_new, bx, bz, 0)
vac_new = apply_correction(vac_new, bx, bz, 0)
R_correct = R_aligned.copy()
R_correct[0,3] += bx*config.LW
R_correct[2,3] += bz*config.LW
return R_correct, occ_new, vac_new
def nearest(R_previous, R_aligned):
# Find the nearest rotation
import expmap
M = [expmap.axis2rot(np.array([0,-i*np.pi/2,0])) for i in range(4)]
rs = [np.dot(m, R_aligned[:3,:3]) for m in M]
global dots
dots = [np.dot(r[0,:3], R_previous[0,:3]) for r in rs]
rot = np.argmax(dots)
R_correct = R_aligned.copy()
R_correct[:3,:3] = rs[rot]
R_correct[:3,3] = np.dot(M[rot], R_correct[:3,3])
# Find the nearest translation
bx = int(np.round((R_previous[0,3]-R_correct[0,3])/config.LW))
bz = int(np.round((R_previous[2,3]-R_correct[2,3])/config.LW))
R_correct[0,3] += config.LW * bx
R_correct[2,3] += config.LW * bz
return R_correct, (bx,bz,rot)
def align_with_previous(R_aligned, occ_new, vac_new):
assert R_aligned.dtype == np.float32
assert R_aligned.shape == (4,4)
global previous_estimate
occ = previous_estimate['occ']
vac = previous_estimate['vac']
R_previous = previous_estimate['R_correct']
global R_correct
R_correct, c = nearest(R_previous, R_aligned)
occ_new = apply_correction(occ_new, *c)
vac_new = apply_correction(vac_new, *c)
(bx,_,bz,rot),err = window_correction(occ, vac, occ_new, vac_new)
#print 'err: ', err
if (bx,bz) != (0,0):
R_correct[0,3] += bx*config.LW
R_correct[2,3] += bz*config.LW
occ_new = apply_correction(occ_new, bx, bz, rot)
vac_new = apply_correction(vac_new, bx, bz, rot)
print 'slip: %d %d %d' % (bx, bz, rot)
return R_correct, occ_new, vac_new
def stencil_carve(depth, rect, R_correct, occ, vac, rgb=None):
global previous_estimate
if not previous_estimate is None:
occ_old = previous_estimate['occ']
cands = occ_old | occ
else:
cands = occ
global b_occ, b_vac, b_total
b_occ, b_vac, b_total = stencil.stencil_carve(depth, R_correct,
cands, rgb, rect)
global occ_stencil, vac_stencil
occ_stencil = (b_occ/(b_total+1.)>0.9) & (b_total>30)
vac_stencil = (b_vac/(b_total+1.)>0.6) & (b_total>30)
global good_alignment
good_alignment = float(b_occ.sum())/b_total.sum()
return occ_stencil, vac_stencil
def merge_with_previous(occ_, vac_, occ_stencil, vac_stencil, color_=None):
# Only allow 'uncarving' of elements attached to known blocks
import scipy.ndimage
global occ, vac, color, color_count
cmask = scipy.ndimage.binary_dilation(occ)
vac |= vac_
vac[occ_stencil] = 0
if occ.sum() > 0:
occ_ &= cmask
occ_ &= occ_stencil
if not color_ is None:
colormask = occ_stencil&(stencil.b_occ>color_count)
color[colormask,:] = color_[colormask,:]
color_count[colormask] = stencil.b_occ[colormask]
#print np.sum(colormask)
occ |= occ_
occ[vac] = 0
color_count[~occ] = 0
def update_previous_estimate(R_correct):
global previous_estimate
previous_estimate = dict(vac=vac,
occ=occ,
R_correct=R_correct,
color=color,
color_count=color_count)
| amiller/blockplayer | blockplayer/grid.py | Python | mpl-2.0 | 9,344 | [
"Mayavi"
] | 68e1f5d23ea86fe8b96f88e2327bcc41bab4d773b6f3c8572fa45906b1326bc8 |
"""
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 2001. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
print(__doc__)
def load_mauna_loa_atmospheric_co2():
ml_data = fetch_openml(data_id=41187, as_frame=False)
months = []
ppmv_sums = []
counts = []
y = ml_data.data[:, 0]
m = ml_data.data[:, 1]
month_float = y + (m - 1) / 12
ppmvs = ml_data.target
for month, ppmv in zip(month_float, ppmvs):
if not months or month != months[-1]:
months.append(month)
ppmv_sums.append(ppmv)
counts.append(1)
else:
# aggregate monthly sum to produce average
ppmv_sums[-1] += ppmv
counts[-1] += 1
months = np.asarray(months).reshape(-1, 1)
avg_ppmvs = np.asarray(ppmv_sums) / counts
return months, avg_ppmvs
X, y = load_mauna_loa_atmospheric_co2()
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-5, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| glemaitre/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | Python | bsd-3-clause | 6,373 | [
"Gaussian"
] | 434fb758ab358afed3acfe0697cfa89f6c00bdf750c7a718e3ba7ecadd10ce4b |
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='verilator',
output_format='regex',
use_stderr=True,
output_regex=r'\%(?:(?P<severity>Error|Warning.*?).*?):'
r'.+?:(?P<line>.+?): (?P<message>.+)')
class VerilogLintBear:
"""
Analyze Verilog code using ``verilator`` and checks for all lint
related and code style related warning messages. It supports the
synthesis subset of Verilog, plus initial statements, proper
blocking/non-blocking assignments, functions, tasks.
It also warns about unused code when a specified signal is never sinked,
and unoptimized code due to some construct, with which the
optimization of the specified signal or block is disabled.
This is done using the ``--lint-only`` command. For more information visit
<http://www.veripool.org/projects/verilator/wiki/Manual-verilator>.
"""
LANGUAGES = {'Verilog'}
REQUIREMENTS = {DistributionRequirement(apt_get='verilator')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/45275'
CAN_DETECT = {'Formatting', 'Code Simplification', 'Syntax', 'Unused Code'}
@staticmethod
def create_arguments(filename, file, config_file):
return '--lint-only', filename
| gs0510/coala-bears | bears/verilog/VerilogLintBear.py | Python | agpl-3.0 | 1,483 | [
"VisIt"
] | 0006f720ed89ba5d97fcbe4a766be75817cb3acfd5c5b9bc4c16808cdf3b3878 |
#!/usr/bin/env python
"""\
py.cleanup [PATH] ...
Delete typical python development related files recursively under the specified PATH (which defaults to the current working directory). Don't follow links and don't recurse into directories with a dot. Optionally remove setup.py related files and empty
directories.
"""
import py
import sys, subprocess
def main():
parser = py.std.optparse.OptionParser(usage=__doc__)
parser.add_option("-e", metavar="ENDING",
dest="endings", default=[".pyc", "$py.class"], action="append",
help=("(multi) recursively remove files with the given ending."
" '.pyc' and '$py.class' are in the default list."))
parser.add_option("-d", action="store_true", dest="removedir",
help="remove empty directories.")
parser.add_option("-p", action="store_true", dest="pycache",
help="remove __pycache__ directories.")
parser.add_option("-s", action="store_true", dest="setup",
help="remove 'build' and 'dist' directories next to setup.py files")
parser.add_option("-a", action="store_true", dest="all",
help="synonym for '-p -S -d -e pip-log.txt'")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="don't print each removed filename on output")
parser.add_option("-n", "--dryrun", dest="dryrun", default=False,
action="store_true",
help="don't actually delete but display would-be-removed filenames.")
(options, args) = parser.parse_args()
Cleanup(options, args).main()
class Cleanup:
def __init__(self, options, args):
if not args:
args = ["."]
self.options = options
self.args = [py.path.local(x) for x in args]
if options.all:
options.setup = True
options.removedir = True
options.pycache = True
options.endings.append("pip-log.txt")
def main(self):
if self.options.setup:
for arg in self.args:
self.setupclean(arg)
for path in self.args:
py.builtin.print_("cleaning path", path,
"of extensions", self.options.endings)
for x in path.visit(self.shouldremove, self.recursedir):
self.remove(x)
if self.options.removedir:
for x in path.visit(lambda x: x.check(dir=1), self.recursedir):
if not x.listdir():
self.remove(x)
def shouldremove(self, p):
if p.check(file=1):
for ending in self.options.endings:
if p.basename.endswith(ending):
return True
return False
if self.options.pycache and p.basename == "__pycache__":
return True
def recursedir(self, path):
return path.check(dotfile=0, link=0)
def remove(self, path):
if not path.check():
return
if self.options.dryrun:
py.builtin.print_("would remove", path)
else:
if not self.options.quiet:
py.builtin.print_("removing", path)
path.remove()
def XXXcallsetup(self, setup, *args):
old = setup.dirpath().chdir()
try:
subprocess.call([sys.executable, str(setup)] + list(args))
finally:
old.chdir()
def setupclean(self, path):
for x in path.visit("setup.py", self.recursedir):
basepath = x.dirpath()
self.remove(basepath / "build")
self.remove(basepath / "dist")
| blindroot/pycmd | pycmd/pycleanup.py | Python | mit | 3,617 | [
"VisIt"
] | e7d41b8190179ae1b0e1f533c65d8c74dfdb54b13ac907e8a5bd74fc9f66ed6d |
#!/usr/bin/env python
#
# Copyright (C) 2011-2012 ABINIT Group (Yann Pouillon)
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
from time import gmtime,strftime
import commands
import os
import re
import sys
# ---------------------------------------------------------------------------- #
#
# Main program
#
# Check if we are in the top of the ABINIT source tree
if ( not os.path.exists("configure.ac") or
not os.path.exists("src/98_main/abinit.F90") ):
print "%s: You must be in the top of an ABINIT source tree." % my_name
print "%s: Aborting now." % my_name
sys.exit(1)
# Init
nerr = 0
bex_diffs = list()
bex_missing = list()
bex_dir = "doc/config/build-examples"
ref_dir = "tests/buildsys/Refs"
# Check files
ref_list = os.listdir(ref_dir)
ref_list.sort()
for ref_file in ref_list:
if ( os.path.exists("%s/%s" % (bex_dir,ref_file)) ):
(ret,tmp) = commands.getstatusoutput("diff -q %s/%s %s/%s" % \
(ref_dir,ref_file,bex_dir,ref_file))
if ( ret != 0 ):
bex_diffs.append(ref_file)
else:
bex_missing.append(ref_file)
nerr = len(bex_diffs) + len(bex_missing)
# Report any mismatch
if ( nerr > 0 ):
sys.stderr.write("%s: reporting wrongly generated build examples\n\n" % \
(os.path.basename(sys.argv[0])))
sys.stderr.write("X: D=Difference detected / M=Missing File\n\n")
sys.stderr.write("%s %-64s\n" % ("X","File"))
sys.stderr.write("%s %s\n" % ("-","-" * 64))
for bex in bex_diffs:
sys.stderr.write("%s %-64s\n" % ("D",bex))
for bex in bex_missing:
sys.stderr.write("%s %-64s\n" % ("M",bex))
sys.stderr.write("\n")
if ( len(bex_missing) > 0 ):
sys.exit(1)
else:
sys.exit(0)
| qsnake/abinit | util/maintainers/check-build-refs.py | Python | gpl-3.0 | 1,788 | [
"ABINIT"
] | 8ec0513bfa0c5a36825a69e837fa8fbcd017965f00dd430b9b3f44e93ea8f75b |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class spanner_admin_instanceCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_instance': ('parent', 'instance_id', 'instance', ),
'delete_instance': ('name', ),
'get_iam_policy': ('resource', 'options', ),
'get_instance': ('name', 'field_mask', ),
'get_instance_config': ('name', ),
'list_instance_configs': ('parent', 'page_size', 'page_token', ),
'list_instances': ('parent', 'page_size', 'page_token', 'filter', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
'update_instance': ('instance', 'field_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=spanner_admin_instanceCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the spanner_admin_instance client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-spanner | scripts/fixup_spanner_admin_instance_v1_keywords.py | Python | apache-2.0 | 6,522 | [
"VisIt"
] | 0bdbb8ac2427cc89c9325eb17cef90fd23c6847f55ec6c2f731454f38f64b668 |
# Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
from unittest import TestCase
import macaroonbakery.httpbakery as httpbakery
import requests
from mock import patch
from httmock import HTTMock, response, urlmatch
ID_PATH = 'http://example.com/someprotecteurl'
json_macaroon = {
u'identifier': u'macaroon-identifier',
u'caveats': [
{
u'cl': u'http://example.com/identity/v1/discharger',
u'vid': u'zgtQa88oS9UF45DlJniRaAUT4qqHhLxQzCeUU9N2O1Uu-'
u'yhFulgGbSA0zDGdkrq8YNQAxGiARA_-AGxyoh25kiTycb8u47pD',
u'cid': u'eyJUaGlyZFBhcnR5UHV'
}, {
u'cid': u'allow read-no-terms write'
}, {
u'cid': u'time-before 2158-07-19T14:29:14.312669464Z'
}],
u'location': u'charmstore',
u'signature': u'52d17cb11f5c84d58441bc0ffd7cc396'
u'5115374ce2fa473ecf06265b5d4d9e81'
}
discharge_token = [{
u'identifier': u'token-identifier===',
u'caveats': [{
u'cid': u'declared username someone'
}, {
u'cid': u'time-before 2158-08-15T15:55:52.428319076Z'
}, {
u'cid': u'origin '
}],
u'location': u'https://example.com/identity',
u'signature': u'5ae0e7a2abf806bdd92f510fcd3'
u'198f520691259abe76ffae5623dae048769ef'
}]
discharged_macaroon = {
u'identifier': u'discharged-identifier=',
u'caveats': [{
u'cid': u'declared uuid a1130b10-3deb-59b7-baf0-c2a3f83e7382'
}, {
u'cid': u'declared username someone'
}, {
u'cid': u'time-before 2158-07-19T15:55:52.432439055Z'
}],
u'location': u'',
u'signature': u'3513db5503ab17f9576760cd28'
u'ce658ce8bf6b43038255969fc3c1cd8b172345'
}
@urlmatch(path='.*/someprotecteurl')
def first_407_then_200(url, request):
if request.headers.get('cookie', '').startswith('macaroon-'):
return {
'status_code': 200,
'content': {
'Value': 'some value'
}
}
else:
resp = response(status_code=407,
content={
'Info': {
'Macaroon': json_macaroon,
'MacaroonPath': '/',
'CookieNameSuffix': 'test'
},
'Message': 'verification failed: no macaroon '
'cookies in request',
'Code': 'macaroon discharge required'
},
headers={'Content-Type': 'application/json'})
return request.hooks['response'][0](resp)
@urlmatch(netloc='example.com:8000', path='.*/someprotecteurl')
def first_407_then_200_with_port(url, request):
if request.headers.get('cookie', '').startswith('macaroon-'):
return {
'status_code': 200,
'content': {
'Value': 'some value'
}
}
else:
resp = response(status_code=407,
content={
'Info': {
'Macaroon': json_macaroon,
'MacaroonPath': '/',
'CookieNameSuffix': 'test'
},
'Message': 'verification failed: no macaroon '
'cookies in request',
'Code': 'macaroon discharge required'
},
headers={'Content-Type': 'application/json'},
request=request)
return request.hooks['response'][0](resp)
@urlmatch(path='.*/someprotecteurl')
def valid_200(url, request):
return {
'status_code': 200,
'content': {
'Value': 'some value'
}
}
@urlmatch(path='.*/discharge')
def discharge_200(url, request):
return {
'status_code': 200,
'content': {
'Macaroon': discharged_macaroon
}
}
@urlmatch(path='.*/discharge')
def discharge_401(url, request):
return {
'status_code': 401,
'content': {
'Code': 'interaction required',
'Info': {
'VisitURL': 'http://example.com/visit',
'WaitURL': 'http://example.com/wait'
}
},
'headers': {
'WWW-Authenticate': 'Macaroon'
}
}
@urlmatch(path='.*/visit')
def visit_200(url, request):
return {
'status_code': 200,
'content': {
'interactive': '/visit'
}
}
@urlmatch(path='.*/wait')
def wait_after_401(url, request):
if request.url != 'http://example.com/wait':
return {'status_code': 500}
return {
'status_code': 200,
'content': {
'DischargeToken': discharge_token,
'Macaroon': discharged_macaroon
}
}
@urlmatch(path='.*/wait')
def wait_on_error(url, request):
return {
'status_code': 500,
'content': {
'DischargeToken': discharge_token,
'Macaroon': discharged_macaroon
}
}
class TestBakery(TestCase):
def assert_cookie_security(self, cookies, name, secure):
for cookie in cookies:
if cookie.name == name:
assert cookie.secure == secure
break
else:
assert False, 'no cookie named {} found in jar'.format(name)
def test_discharge(self):
client = httpbakery.Client()
with HTTMock(first_407_then_200), HTTMock(discharge_200):
resp = requests.get(ID_PATH,
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
assert 'macaroon-test' in client.cookies.keys()
self.assert_cookie_security(client.cookies, 'macaroon-test',
secure=False)
@patch('webbrowser.open')
def test_407_then_401_on_discharge(self, mock_open):
client = httpbakery.Client()
with HTTMock(first_407_then_200), HTTMock(discharge_401), \
HTTMock(wait_after_401):
resp = requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
resp.raise_for_status()
mock_open.assert_called_once_with(u'http://example.com/visit', new=1)
assert 'macaroon-test' in client.cookies.keys()
@patch('webbrowser.open')
def test_407_then_error_on_wait(self, mock_open):
client = httpbakery.Client()
with HTTMock(first_407_then_200), HTTMock(discharge_401),\
HTTMock(wait_on_error):
with self.assertRaises(httpbakery.InteractionError) as exc:
requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(str(exc.exception),
'cannot start interactive session: cannot get '
'http://example.com/wait')
mock_open.assert_called_once_with(u'http://example.com/visit', new=1)
def test_407_then_no_interaction_methods(self):
client = httpbakery.Client(interaction_methods=[])
with HTTMock(first_407_then_200), HTTMock(discharge_401):
with self.assertRaises(httpbakery.InteractionError) as exc:
requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(str(exc.exception),
'cannot start interactive session: interaction '
'required but not possible')
def test_407_then_unknown_interaction_methods(self):
class UnknownInteractor(httpbakery.Interactor):
def kind(self):
return 'unknown'
client = httpbakery.Client(interaction_methods=[UnknownInteractor()])
with HTTMock(first_407_then_200), HTTMock(discharge_401),\
HTTMock(visit_200):
with self.assertRaises(httpbakery.InteractionError) as exc:
requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(
str(exc.exception),
'cannot start interactive session: no methods supported; '
'supported [unknown]; provided [interactive]'
)
def test_cookie_with_port(self):
client = httpbakery.Client()
with HTTMock(first_407_then_200_with_port):
with HTTMock(discharge_200):
resp = requests.get('http://example.com:8000/someprotecteurl',
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
assert 'macaroon-test' in client.cookies.keys()
def test_secure_cookie_for_https(self):
client = httpbakery.Client()
with HTTMock(first_407_then_200_with_port), HTTMock(discharge_200):
resp = requests.get(
'https://example.com:8000/someprotecteurl',
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
assert 'macaroon-test' in client.cookies.keys()
self.assert_cookie_security(client.cookies, 'macaroon-test',
secure=True)
| go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/tests/test_bakery.py | Python | lgpl-3.0 | 9,725 | [
"VisIt"
] | a8705dd2dc7408d3a54218fae91ca944d3996d0fa86f72e78d351ba002157d81 |
#!/usr/bin/env python
"""
This script drives an NWChem calculation given a generic QM specification
"""
import json
import os
import pint
from run import *
CRDPARAMS = "nwchem.crdparams"
class QMMMRunner(QMRunner):
def _geom_block(self):
if not os.path.isfile(CRDPARAMS):
raise IOError('Required input file %s not found' % CRDPARAMS)
return "mm\ncrdparms load %s\nend" % CRDPARAMS
def _taskblock(self):
fields = super(QMMMRunner, self)._taskblock().split()
fields.insert(1, 'mm')
fields.append('ignore')
return ' '.join(fields)
if __name__ == '__main__':
main(QMMMRunner)
| molecular-toolkit/chemistry-docker-images | makefiles/buildfiles/nwchem/runqmmm.py | Python | apache-2.0 | 650 | [
"NWChem"
] | 4d6c7dfedbf4c2f39f428cd684b94399347d64d7a0d86cce9f33c9365c5f9196 |
#
# -*- coding: utf-8 -*-
#
# (C) Copyright 2016 Karellen, Inc. (http://karellen.co/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pybuilder.core import use_plugin, init, before, Project, Author, Logger
from pybuilder.errors import BuildFailedException
from pybuilder.pip_utils import pip_install
use_plugin("pypi:karellen_pyb_plugin", ">=0.0.1.dev")
name = "karellen-sqlite"
version = "0.0.4.dev"
url = "https://github.com/karellen/karellen-sqlite"
description = "Please visit %s for more information!" % url
summary = "Karellen Core"
author = Author("Karellen, Inc", "supervisor@karellen.co")
authors = [author]
license = "Apache License, Version 2.0"
default_task = ["install_dependencies", "analyze", "sphinx_generate_documentation", "publish"]
obsoletes = "pysqlite"
@init
def set_properties(project: Project):
project.set_property("verbose", True)
# Dependencies
project.build_depends_on("karellen-testing", ">=0.0.1dev")
# Cram Configuration
project.set_property("cram_fail_if_no_tests", False)
# Integration Tests Coverage is disabled since there are no integration tests
project.set_property("integrationtest_coverage_threshold_warn", 0)
project.set_property("integrationtest_coverage_branch_threshold_warn", 0)
project.set_property("integrationtest_coverage_branch_partial_threshold_warn", 0)
project.set_property("distutils_classifiers", project.get_property("distutils_classifiers") + [
"Topic :: Database :: Database Engines/Servers",
"Topic :: Software Development :: Libraries :: Python Modules"])
@before("run_integration_tests")
def install_self(project: Project, logger: Logger):
logger.info("Installing %s..." % name)
if pip_install(project.expand_path("$dir_dist"), force_reinstall=True):
raise BuildFailedException("Unable to install %s" % name)
| karellen/karellen-sqlite | build.py | Python | apache-2.0 | 2,363 | [
"VisIt"
] | 31f5e0b8b9efc74d35009f5e1ce5e8e4df7de56580bd48cda052a4490606765f |
# -*- coding: utf-8 -*-
"""
Subraph centrality and communicability betweenness.
"""
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import *
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Franck Kalala (franckkalala@yahoo.fr'])
__all__ = ['subgraph_centrality_exp',
'subgraph_centrality',
'communicability_betweenness_centrality',
'estrada_index'
]
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def subgraph_centrality_exp(G):
r"""Return the subgraph centrality for each node of G.
Subgraph centrality of a node `n` is the sum of weighted closed
walks of all lengths starting and ending at node `n`. The weights
decrease with path length. Each closed walk is associated with a
connected subgraph ([1]_).
Parameters
----------
G: graph
Returns
-------
nodes:dictionary
Dictionary of nodes with subgraph centrality as the value.
Raises
------
NetworkXError
If the graph is not undirected and simple.
See Also
--------
subgraph_centrality:
Alternative algorithm of the subgraph centrality for each node of G.
Notes
-----
This version of the algorithm exponentiates the adjacency matrix.
The subgraph centrality of a node `u` in G can be found using
the matrix exponential of the adjacency matrix of G [1]_,
.. math::
SC(u)=(e^A)_{uu} .
References
----------
.. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
"Subgraph centrality in complex networks",
Physical Review E 71, 056103 (2005).
https://arxiv.org/abs/cond-mat/0504730
Examples
--------
(Example from [1]_)
>>> G = nx.Graph([(1,2),(1,5),(1,8),(2,3),(2,8),(3,4),(3,6),(4,5),(4,7),(5,6),(6,7),(7,8)])
>>> sc = nx.subgraph_centrality_exp(G)
>>> print(['%s %0.2f'%(node,sc[node]) for node in sorted(sc)])
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
"""
# alternative implementation that calculates the matrix exponential
import scipy.linalg
nodelist = list(G) # ordering of nodes in matrix
A = nx.to_numpy_matrix(G, nodelist)
# convert to 0-1 matrix
A[A != 0.0] = 1
expA = scipy.linalg.expm(A.A)
# convert diagonal to dictionary keyed by node
sc = dict(zip(nodelist, map(float, expA.diagonal())))
return sc
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def subgraph_centrality(G):
r"""Return subgraph centrality for each node in G.
Subgraph centrality of a node `n` is the sum of weighted closed
walks of all lengths starting and ending at node `n`. The weights
decrease with path length. Each closed walk is associated with a
connected subgraph ([1]_).
Parameters
----------
G: graph
Returns
-------
nodes : dictionary
Dictionary of nodes with subgraph centrality as the value.
Raises
------
NetworkXError
If the graph is not undirected and simple.
See Also
--------
subgraph_centrality_exp:
Alternative algorithm of the subgraph centrality for each node of G.
Notes
-----
This version of the algorithm computes eigenvalues and eigenvectors
of the adjacency matrix.
Subgraph centrality of a node `u` in G can be found using
a spectral decomposition of the adjacency matrix [1]_,
.. math::
SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},
where `v_j` is an eigenvector of the adjacency matrix `A` of G
corresponding corresponding to the eigenvalue `\lambda_j`.
Examples
--------
(Example from [1]_)
>>> G = nx.Graph([(1,2),(1,5),(1,8),(2,3),(2,8),(3,4),(3,6),(4,5),(4,7),(5,6),(6,7),(7,8)])
>>> sc = nx.subgraph_centrality(G)
>>> print(['%s %0.2f'%(node,sc[node]) for node in sorted(sc)])
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
References
----------
.. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
"Subgraph centrality in complex networks",
Physical Review E 71, 056103 (2005).
https://arxiv.org/abs/cond-mat/0504730
"""
import numpy
import numpy.linalg
nodelist = list(G) # ordering of nodes in matrix
A = nx.to_numpy_matrix(G, nodelist)
# convert to 0-1 matrix
A[A != 0.0] = 1
w, v = numpy.linalg.eigh(A.A)
vsquare = numpy.array(v)**2
expw = numpy.exp(w)
xg = numpy.dot(vsquare, expw)
# convert vector dictionary keyed by node
sc = dict(zip(nodelist, map(float, xg)))
return sc
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def communicability_betweenness_centrality(G, normalized=True):
r"""Return subgraph communicability for all pairs of nodes in G.
Communicability betweenness measure makes use of the number of walks
connecting every pair of nodes as the basis of a betweenness centrality
measure.
Parameters
----------
G: graph
Returns
-------
nodes : dictionary
Dictionary of nodes with communicability betweenness as the value.
Raises
------
NetworkXError
If the graph is not undirected and simple.
Notes
-----
Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges,
and `A` denote the adjacency matrix of `G`.
Let `G(r)=(V,E(r))` be the graph resulting from
removing all edges connected to node `r` but not the node itself.
The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros
only in row and column `r`.
The subraph betweenness of a node `r` is [1]_
.. math::
\omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
p\neq q, q\neq r,
where
`G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks
involving node r,
`G_{pq}=(e^{A})_{pq}` is the number of closed walks starting
at node `p` and ending at node `q`,
and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
number of terms in the sum.
The resulting `\omega_{r}` takes values between zero and one.
The lower bound cannot be attained for a connected
graph, and the upper bound is attained in the star graph.
References
----------
.. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano,
"Communicability Betweenness in Complex Networks"
Physica A 388 (2009) 764-774.
https://arxiv.org/abs/0905.4102
Examples
--------
>>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
>>> cbc = nx.communicability_betweenness_centrality(G)
"""
import scipy
import scipy.linalg
nodelist = list(G) # ordering of nodes in matrix
n = len(nodelist)
A = nx.to_numpy_matrix(G, nodelist)
# convert to 0-1 matrix
A[A != 0.0] = 1
expA = scipy.linalg.expm(A.A)
mapping = dict(zip(nodelist, range(n)))
cbc = {}
for v in G:
# remove row and col of node v
i = mapping[v]
row = A[i, :].copy()
col = A[:, i].copy()
A[i, :] = 0
A[:, i] = 0
B = (expA - scipy.linalg.expm(A.A)) / expA
# sum with row/col of node v and diag set to zero
B[i, :] = 0
B[:, i] = 0
B -= scipy.diag(scipy.diag(B))
cbc[v] = float(B.sum())
# put row and col back
A[i, :] = row
A[:, i] = col
# rescaling
cbc = _rescale(cbc, normalized=normalized)
return cbc
def _rescale(cbc, normalized):
# helper to rescale betweenness centrality
if normalized is True:
order = len(cbc)
if order <= 2:
scale = None
else:
scale = 1.0 / ((order - 1.0)**2 - (order - 1.0))
if scale is not None:
for v in cbc:
cbc[v] *= scale
return cbc
def estrada_index(G):
r"""Return the Estrada index of a the graph G.
The Estrada Index is a topological index of folding or 3D "compactness" ([1]_).
Parameters
----------
G: graph
Returns
-------
estrada index: float
Raises
------
NetworkXError
If the graph is not undirected and simple.
Notes
-----
Let `G=(V,E)` be a simple undirected graph with `n` nodes and let
`\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
be a non-increasing ordering of the eigenvalues of its adjacency
matrix `A`. The Estrada index is ([1]_, [2]_)
.. math::
EE(G)=\sum_{j=1}^n e^{\lambda _j}.
References
----------
.. [1] E. Estrada, "Characterization of 3D molecular structure",
Chem. Phys. Lett. 319, 713 (2000).
https://doi.org/10.1016/S0009-2614(00)00158-5
.. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada,
"Estimating the Estrada index",
Linear Algebra and its Applications. 427, 1 (2007).
https://doi.org/10.1016/j.laa.2007.06.020
Examples
--------
>>> G=nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
>>> ei=nx.estrada_index(G)
"""
return sum(subgraph_centrality(G).values())
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py | Python | gpl-3.0 | 9,596 | [
"Desmond"
] | 4c7980c1647f02359a0452f8d1e0611451e23d415ebb8646485858c83cc70242 |
""" ProxyRenewalAgent keeps the proxy repository clean.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN ProxyRenewalAgent
:end-before: ##END
:dedent: 2
:caption: ProxyRenewalAgent options
"""
import concurrent.futures
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.FrameworkSystem.DB.ProxyDB import ProxyDB
DEFAULT_MAIL_FROM = "proxymanager@diracgrid.org"
class ProxyRenewalAgent(AgentModule):
def initialize(self):
requiredLifeTime = self.am_getOption("MinimumLifeTime", 3600)
renewedLifeTime = self.am_getOption("RenewedLifeTime", 54000)
mailFrom = self.am_getOption("MailFrom", DEFAULT_MAIL_FROM)
self.useMyProxy = self.am_getOption("UseMyProxy", False)
self.proxyDB = ProxyDB(useMyProxy=self.useMyProxy, mailFrom=mailFrom)
self.log.info(f"Minimum Life time : {requiredLifeTime}")
self.log.info(f"Life time on renew : {renewedLifeTime}")
if self.useMyProxy:
self.log.info(f"MyProxy server : {self.proxyDB.getMyProxyServer()}")
self.log.info(f"MyProxy max proxy time : {self.proxyDB.getMyProxyMaxLifeTime()}")
return S_OK()
def __renewProxyForCredentials(self, userDN, userGroup):
lifeTime = self.am_getOption("RenewedLifeTime", 54000)
self.log.info(f"Renewing for {userDN}@{userGroup} {lifeTime} secs")
res = self.proxyDB.renewFromMyProxy(userDN, userGroup, lifeTime=lifeTime)
if not res["OK"]:
self.log.error("Failed to renew proxy", f"for {userDN}@{userGroup} : {res['Message']}")
else:
self.log.info(f"Renewed proxy for {userDN}@{userGroup}")
def execute(self):
"""The main agent execution method"""
self.log.verbose("Purging expired requests")
res = self.proxyDB.purgeExpiredRequests()
if not res["OK"]:
self.log.error(res["Message"])
else:
self.log.info(f"Purged {res['Value']} requests")
self.log.verbose("Purging expired tokens")
res = self.proxyDB.purgeExpiredTokens()
if not res["OK"]:
self.log.error(res["Message"])
else:
self.log.info(f"Purged {res['Value']} tokens")
self.log.verbose("Purging expired proxies")
res = self.proxyDB.purgeExpiredProxies()
if not res["OK"]:
self.log.error(res["Message"])
else:
self.log.info(f"Purged {res['Value']} proxies")
self.log.verbose("Purging logs")
res = self.proxyDB.purgeLogs()
if not res["OK"]:
self.log.error(res["Message"])
if self.useMyProxy:
res = self.proxyDB.getCredentialsAboutToExpire(self.am_getOption("MinimumLifeTime", 3600))
if not res["OK"]:
return res
data = res["Value"]
self.log.info(f"Renewing {len(data)} proxies...")
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = []
for record in data:
userDN = record[0]
userGroup = record[1]
futures.append(executor.submit(self.__renewProxyForCredentials, userDN, userGroup))
return S_OK()
| DIRACGrid/DIRAC | src/DIRAC/FrameworkSystem/Agent/ProxyRenewalAgent.py | Python | gpl-3.0 | 3,325 | [
"DIRAC"
] | cb49654656f23b6953953deeebd2aceacd6a0298495914fa7a5ea4ff5f7f342c |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from pymatgen.io.cif import CifParser
from pymatgen.analysis.magnetism.jahnteller import *
import unittest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class JahnTellerTest(unittest.TestCase):
def setUp(self):
self.jt = JahnTellerAnalyzer()
def test_jahn_teller_species_analysis(self):
# 1 d-shell electron
m = self.jt.get_magnitude_of_effect_from_species('Ti3+', '', 'oct')
self.assertEqual(m, "weak")
# 2 d-shell electrons
m = self.jt.get_magnitude_of_effect_from_species('Ti2+', '', 'oct')
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species('V3+', '', 'oct')
self.assertEqual(m, "weak")
# 3
m = self.jt.get_magnitude_of_effect_from_species('V2+', '', 'oct')
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species('Cr3+', '', 'oct')
self.assertEqual(m, "none")
# 4
m = self.jt.get_magnitude_of_effect_from_species('Cr2+', 'high', 'oct')
self.assertEqual(m, "strong")
m = self.jt.get_magnitude_of_effect_from_species('Cr2+', 'low', 'oct')
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species('Mn3+', 'high', 'oct')
self.assertEqual(m, "strong")
m = self.jt.get_magnitude_of_effect_from_species('Mn3+', 'low', 'oct')
self.assertEqual(m, "weak")
# 5
m = self.jt.get_magnitude_of_effect_from_species('Mn2+', 'high', 'oct')
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species('Mn2+', 'low', 'oct')
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species('Fe3+', 'high', 'oct')
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species('Fe3+', 'low', 'oct')
self.assertEqual(m, "weak")
# 6
m = self.jt.get_magnitude_of_effect_from_species('Fe2+', 'high', 'oct')
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species('Fe2+', 'low', 'oct')
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species('Co3+', 'high', 'oct')
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species('Co3+', 'low', 'oct')
self.assertEqual(m, "none")
# 7
m = self.jt.get_magnitude_of_effect_from_species('Co2+', 'high', 'oct')
self.assertEqual(m, "weak")
m = self.jt.get_magnitude_of_effect_from_species('Co2+', 'low', 'oct')
self.assertEqual(m, "strong")
# 8
m = self.jt.get_magnitude_of_effect_from_species('Ni2+', '', 'oct')
self.assertEqual(m, "none")
# 9
m = self.jt.get_magnitude_of_effect_from_species('Cu2+', '', 'oct')
self.assertEqual(m, "strong")
# 10
m = self.jt.get_magnitude_of_effect_from_species('Cu+', '', 'oct')
self.assertEqual(m, "none")
m = self.jt.get_magnitude_of_effect_from_species('Zn2+', '', 'oct')
self.assertEqual(m, "none")
def test_jahn_teller_structure_analysis(self):
parser = CifParser(os.path.join(test_dir, 'LiFePO4.cif'))
LiFePO4 = parser.get_structures()[0]
parser = CifParser(os.path.join(test_dir, 'Fe3O4.cif'))
Fe3O4 = parser.get_structures()[0]
self.assertTrue(self.jt.is_jahn_teller_active(LiFePO4))
self.assertTrue(self.jt.is_jahn_teller_active(Fe3O4))
LiFePO4_analysis = {
'active': True,
'strength': 'weak',
'sites': [
{
'ligand': 'O2-',
'ligand_bond_length_spread': 0.2111,
'ligand_bond_lengths': [2.1382,
2.0840,
2.0863,
2.2383,
2.2951,
2.2215],
'strength': 'weak',
'motif': 'oct',
'motif_order_parameter': 0.1441,
'site_indices': [4, 5, 6, 7],
'species': 'Fe2+',
'spin_state': 'unknown'
}
]
}
self.assertDictEqual(LiFePO4_analysis, self.jt.get_analysis(LiFePO4))
def test_mu_so(self):
SpeciesCo = Specie(symbol='Co', oxidation_state=4)
self.assertAlmostEqual(np.sqrt(3), JahnTellerAnalyzer.mu_so(SpeciesCo, 'oct', 'low'))
self.assertAlmostEqual(np.sqrt(35), JahnTellerAnalyzer.mu_so(SpeciesCo, 'oct', 'high'))
SpeciesNa = Specie(symbol='Na', oxidation_state=1)
self.assertEqual(None, JahnTellerAnalyzer.mu_so(SpeciesNa, 'oct', 'high'))
if __name__ == '__main__':
unittest.main()
| nisse3000/pymatgen | pymatgen/analysis/magnetism/tests/test_jahnteller.py | Python | mit | 5,117 | [
"pymatgen"
] | 63b4533ebb5f4f89abc08abcdd9d4d4f46ac62d97e2d98d1ac4703d88583f5d9 |
import time
#from telegramFormat import *
from api_definitions import *
import itertools
import struct
import io
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# #file handler for logs
# handler = logging.FileHandler('commands.log')
# handler.setLevel(logging.INFO)
# # create a logging format
# formatter = logging.Formatter('%(asctime)s - %(module)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
#logger.disabled = True
# GLOBAL VARIABLES
MILK_OUTLET_DATA_SIZE = 3
MILK_TUBE_LENGTH = 350
SCREEN_RINSE_DATA_SIZE = 2
RIGHT_SIDE = 1
LEFT_SIDE = 0
MAX_ACK_RETRIES = 3
############################################# START OF CREATING AND READING TELEGRAMS ##########################
def CreateTelegram(PIP_p, PIE_p, PN_p, SA_p, DA_p, MI_p, MP_p, DL_p, dataPackets_p, data_p):
array = [
PIP_p,
PIE_p,
PN_p,
SA_p,
DA_p,
MI_p,
MP_p & 0xff,
((MP_p >> 8) & 0xff),
DL_p & 0xff,
((DL_p >> 8) & 0xff),
]
if(dataPackets_p):
for var in data_p:
array.append(var)
# call to CRC
CRC = API_CalculateCRC(array, len(array))
array.append(CRC & 0xff)
array.append((CRC >> 8) & 0xff)
##Stuff and shift
shifted = StuffAndShift(array)
finalTelegram = [DPT_SpecialChar_t.SOH_e.value] + \
shifted + [DPT_SpecialChar_t.EOT_e.value]
logger.info("\nSent from Pi: " + str(finalTelegram))
return bytes(finalTelegram)
def CreateACKPacket(PIP_p, PIE_p, PN_p, SA_p, DA_p):
array = [
PIP_p,
PIE_p,
PN_p,
SA_p,
DA_p,
]
shifted = StuffAndShift(array)
finalTelegram = [DPT_SpecialChar_t.SOH_e.value] + \
shifted + [DPT_SpecialChar_t.EOT_e.value]
#print("Telegram Created:", finalTelegram)
return bytes(finalTelegram)
def StuffAndShift(arrayToShift):
a = arrayToShift
i = 0
lengthA = len(a)
while i < lengthA:
for j in DPT_SpecialChar_t:
if(a[i] == j.value):
b = a[0:i] # before
c = a[i + 1:] # after
stuff = DPT_SpecialChar_t.DLE_e.value
shift = a[i] ^ DPT_SpecialChar_t.ShiftXOR_e.value
a = b + [stuff, shift] + c
i = i + 1
lengthA = len(a)
break
i += 1
return a
def DeStuffAndShift(arrayToDeShift):
i = 0
while(i < len(arrayToDeShift)):
if(arrayToDeShift[i] == DPT_SpecialChar_t.DLE_e.value):
b = (arrayToDeShift[i + 1] ^ DPT_SpecialChar_t.ShiftXOR_e.value)
c = arrayToDeShift[i + 2:]
d = arrayToDeShift[0:i]
arrayToDeShift = d + [b] + c
i += 1
return arrayToDeShift
def ByteToBitArray(num, size=8):
"""Format a number as binary with leading zeros"""
bitArrayString = str(bin(num)[2:]).zfill(size)
bitArray = list(map(int, bitArrayString))
return bitArray
def LowNibble(bitArray):
return bitArray & 0x0F
def HighNibble(bitArray):
return bitArray >> 4
############################################# END OF CREATING AND READING TELEGRAMS ###########################
############################################# START OF SENDING PACKETS #######################################
def WaitTillReady(port, seqNumber):
ready = False
readyCnt = 1
while(ready == False):
print("\n\n\n\n")
logger.info("ATTEMPT" + str(readyCnt) + "\n\n\n")
ready = CheckIfReady(port,seqNumber)
seqNumber +=1
readyCnt +=1
return seqNumber
def CheckIfReady(port,seqNumber):
statuses = GetMachineStatus(port, seqNumber)[0]
logger.info("\nReading machine status...")
logger.debug(str(statuses))
# print (statuses[1]['Just Reset']) # How to Access Machine status bits
# print (statuses[0]['Coffee Left Process']) # How to Access General status bits
del statuses['Machine Status']
statusCodes = list(val for key,val in statuses.items() if 'status' in key.lower())
print(statusCodes)
if all([ v == 1 for v in statusCodes ]) :
logger.info("\Success")
logger.info ("\n The coffee machine is ready")
return True
else:
logger.error("\nSomething is wrong, one or more statuses indicate they are not ready")
return False
def GetMachineStatus(port, seqNumber):
# Get the request telegram
logger.info("\n\n Sending a request to get the machine status...")
telegram = GetStatusTelegram(seqNumber)
# Get an ACK packet so the coffee machine is ready to send a response
InitRequest(port,seqNumber,telegram)
logger.info("\nResending the request to receive response...")
# Change the sequence number for the new packet
seqNumber += 1
telegram = GetStatusTelegram(seqNumber)
# Get the response data
responseData = DoRequest(port,seqNumber,telegram)
return GetStatusArrays(responseData)
def InitRequest(port,seqNumber,telegram):
WaitTillACK(port, seqNumber, telegram)
ClearPortContents(port)
def WaitTillACK(port, seqNumber, telegram):
ACKReceived = False
retryCnt = 0
while(ACKReceived == False and retryCnt <= MAX_ACK_RETRIES):
ACKReceived = CheckForAck(port, seqNumber, telegram)
if(ACKReceived):
break
time.sleep(0.8) # might be problem
seqNumber+=1
retryCnt +=1
def CheckForAck(port, seqNumber, telegram):
# Send Command/Request
port.write(bytes(telegram)) # send command to coffee machine
time.sleep(0.1)
# Read response
cmResponse = port.read_all() # read from Pi
##Convert response to telegram array
telegram = ResponseToTelegramArray(cmResponse)
logger.info("\n Received from coffee machine: " + str(telegram))
if(len(telegram) > 0 ):
if(telegram[2] == PacketTypes.ACK.value):
logger.info("\nCoffee machine sends ACK to command/request.. message received")
return True
else:
logger.info("\n Received no ACK packet from coffee machine...sending packet again")
return False
# Check if machine is ready before sending any commands/requests
def DoCommand(port,seqNumber,telegram):
WaitTillACK(port,seqNumber,telegram)
ClearPortContents(port)
port.write(bytes(telegram))
return 0
def DoRequest(port,seqNumber, telegram):
machineStatusReceived = False
responseData = 0
while machineStatusReceived == False:
port.write(bytes(telegram))
time.sleep(0.15)
response = port.read_all()
responseData = ResponseToTelegramArray(response)[7:]
if(len(responseData) > 0):
# decode the message to see if it is indeed a response
if (responseData[2] == PacketTypes.RESPONSE.value):
machineStatusReceived = True
logger.info("\nResponse successfully received:")
logger.info("\n Response received from coffee machine: " + str(responseData))
else:
return 0
# Do something
return responseData
def ClearPortContents(port):
port.reset_input_buffer()
port.reset_output_buffer()
def GetStatusArrays(telegram):
startOfDataBits = 11 # SOH = 0, PIP = 1, PIE = 2 ... DL = (9+10) - 16 bit
dataBits = {'Machine Status' : telegram[startOfDataBits], \
'Coffee Left Action': HighNibble(telegram[startOfDataBits + 1]), \
'Coffee Left Status': LowNibble(telegram[startOfDataBits + 1]), \
'Coffee Right Action': HighNibble(telegram[startOfDataBits + 2]), \
'Coffee Right Status': LowNibble(telegram[startOfDataBits + 2]), \
'Steam Left Action': HighNibble(telegram[startOfDataBits + 3]), \
'Steam Left Status': LowNibble(telegram[startOfDataBits + 3]), \
'Steam Right Action': HighNibble(telegram[startOfDataBits + 4]), \
'Steam Right Status': LowNibble(telegram[startOfDataBits + 4]), \
'Water Action': HighNibble(telegram[startOfDataBits + 5]), \
'Water Status': LowNibble(telegram[startOfDataBits + 5]), \
'Coffee Left Process': telegram[startOfDataBits + 6], \
'Coffee Right Process': telegram[startOfDataBits + 7], \
'Steam Left Process': telegram[startOfDataBits + 8], \
'Steam Right Process': telegram[startOfDataBits + 9], \
'Water Process': telegram[startOfDataBits + 10], \
}
## Might need to flip if big-endian architecture
machineStatus = ByteToBitArray(dataBits['Machine Status'])
machineStatusData = {
'Just Reset': machineStatus[0], \
'Request Set': machineStatus[1], \
'Info Message Set': machineStatus[2], \
'Product Dump Left': machineStatus[4], \
'Product Dump Right': machineStatus[5], \
}
return [dataBits, machineStatusData]
def ResponseToTelegramArray(response):
telegram = TelegramToIntArray(response)
telegram = DeStuffAndShift(telegram)
return telegram
def TelegramToIntArray(telegram):
array = []
a = struct.unpack('B' * len(telegram), telegram)
array.append(a)
return list(itertools.chain.from_iterable(array))
def readtilleol(port):
eol = b'\x04'
leneol = len(eol)
line = bytearray()
while True:
c = port.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
############################################# END OF SENDING PACKETS #########################################
############################################# START OF PRODUCTS ##############################################
def DoProduct(side,dataDict,seqNum):
data = [dataDict["Product Type: "],dataDict["Product Process: "],dataDict["Water Quantity: "] & 0xff, \
((dataDict["Water Quantity: "] >> 8) & 0xff),dataDict["Bean Hopper: "], \
dataDict["Cake Thickness: "] & 0xff, ((dataDict["Cake Thickness: "] >> 8) & 0xff), \
dataDict["Tamping: "], dataDict["Pre-Infusion: "],dataDict["Relax Time: "], dataDict["Second Tamping: "], \
dataDict["Milk Qty: "] & 0xff, ((dataDict["Milk Qty: "] >> 8) & 0xff), \
dataDict["Milk Temperature: "], dataDict["Milk Percent: "], dataDict["Milk Seq: "], dataDict["Latte Macchiato Time: "], \
dataDict["Foam Sequence: "], \
dataDict["Steam Time: "] & 0xff, ((dataDict["Steam Time: "] >> 8) & 0xff), \
dataDict["Steam Temperature: "], dataDict["Everfoam Mode: "], dataDict["Air Stop Temperature: "], \
dataDict["Air Stop Time: "] & 0xff, ((dataDict["Air Stop Time: "] >> 8) & 0xff), \
dataDict["Pump Speed Milk: "] & 0xff, ((dataDict["Pump Speed Milk: "] >> 8) & 0xff), \
dataDict["Pump Speed Foam: "] & 0xff, ((dataDict["Pump Speed Foam: "] >> 8) & 0xff), \
dataDict["param 23: "], \
dataDict["Milk/Coffee Delay: "] & 0xff, ((dataDict["Milk/Coffee Delay: "] >> 8) & 0xff)]
#print(len(data))
telegram = CreateTelegram(0x00,PacketTypes.COMMAND.value,seqNum,0x42,0x41,0x02,side,len(data),True,data)
#print([hex(x) for x in TelegramToIntArray(telegram)])
#print(len(TelegramToIntArray(telegram)))
return telegram
# Product 1 Coffee
def DoCoffee(side,seqNum):
dataDict = {"Product Type: ":ProductType_t.Coffee_e.value, "Product Process: ":0, \
"Water Quantity: ":135 , "Bean Hopper: ":1, "Cake Thickness: ":140, \
"Tamping: ":64, "Pre-Infusion: ": 0, "Relax Time: ": 0, "Second Tamping: ":0, \
"Milk Qty: ":0, "Milk Temperature: ":255,"Milk Percent: ":0,"Milk Seq: ":MilkSequence_t.MilkSeqUndef_e.value,\
"Latte Macchiato Time: ":1, "Foam Sequence: ":0, "Steam Time: ":1,\
"Steam Temperature: ":30, "Everfoam Mode: ":0, "Air Stop Temperature: ":0, "Air Stop Time: ":10, \
"Pump Speed Milk: ":1500, "Pump Speed Foam: ":3000, "param 23: ":0, "Milk/Coffee Delay: ":5}
telegram = DoProduct(side,dataDict,seqNum)
return telegram
def DoCoffeeLeftTelegram(seqNum):
telegram = DoCoffee(LEFT_SIDE,seqNum)
return telegram
def DoCoffeeRightTelegram(seqNum):
telegram = DoCoffee(RIGHT_SIDE,seqNum)
return telegram
# Product 2 Espresso
def DoEspresso(side,seqNum):
dataDict = {"Product Type: ":ProductType_t.Espresso_e.value, "Product Process: ":0, \
"Water Quantity: ":50 , "Bean Hopper: ":0, "Cake Thickness: ":140, \
"Tamping: ":64, "Pre-Infusion: ": 8, "Relax Time: ": 20, "Second Tamping: ":20, \
"Milk Qty: ":0, "Milk Temperature: ":255,"Milk Percent: ":0,"Milk Seq: ":MilkSequence_t.MilkSeqUndef_e.value,\
"Latte Macchiato Time: ":1, "Foam Sequence: ":0, "Steam Time: ":1,\
"Steam Temperature: ":30, "Everfoam Mode: ":0, "Air Stop Temperature: ":0, "Air Stop Time: ":10, \
"Pump Speed Milk: ":1500, "Pump Speed Foam: ":3000, "param 23: ":0, "Milk/Coffee Delay: ":5}
telegram = DoProduct(side,dataDict,seqNum)
return telegram
def DoEspressoLeftTelegram(seqNum):
telegram = DoEspresso(LEFT_SIDE,seqNum)
return telegram
def DoEspressoRightTelegram(seqNum):
telegram = DoEspresso(RIGHT_SIDE,seqNum)
return telegram
# Product 3 Hot water
def DoHotWater(side,seqNum):
dataDict = {"Product Type: ":ProductType_t.HotWater_e.value, "Product Process: ":2, \
"Water Quantity: ":100 , "Bean Hopper: ":0, "Cake Thickness: ":140, \
"Tamping: ":64, "Pre-Infusion: ": 8, "Relax Time: ": 20, "Second Tamping: ":20, \
"Milk Qty: ":10, "Milk Temperature: ":255,"Milk Percent: ":1,"Milk Seq: ":MilkSequence_t.MilkSeqUndef_e.value,\
"Latte Macchiato Time: ":1, "Foam Sequence: ":0, "Steam Time: ":1,\
"Steam Temperature: ":30, "Everfoam Mode: ":0, "Air Stop Temperature: ":0, "Air Stop Time: ":10, \
"Pump Speed Milk: ":1500, "Pump Speed Foam: ":3000, "param 23: ":0, "Milk/Coffee Delay: ":5}
telegram = DoProduct(side,dataDict,seqNum)
return telegram
def DoHotWaterLeftTelegram(seqNum):
telegram = DoHotWater(LEFT_SIDE,seqNum)
return telegram
def DoHotWaterRightTelegram(seqNum):
telegram = DoHotWater(RIGHT_SIDE,seqNum)
return telegram
############################################# END OF PRODUCTS ################################################
####################################### START OF TELEGRAMS ####################################################
def DoRinseTelegram(MI, MP, DL, seqNum):
telegram = CreateTelegram(0x00, PacketTypes.COMMAND.value, seqNum, 0x42,
0x41, MI, MP, DL, False, [0])
return telegram
def DoRinseLeftTelegram(seqNum):
telegram = DoRinseTelegram(API_Command_t.DoRinse_e.value, LEFT_SIDE, 0, seqNum)
return telegram
def DoRinseRightTelegram(seqNum):
telegram = DoRinseTelegram(API_Command_t.DoRinse_e.value, RIGHT_SIDE, 0, seqNum)
return telegram
def StartCleanTelegram(seqNum):
telegram = CreateTelegram(0x00, PacketTypes.COMMAND.value, seqNum, 0x42, 0x41,
API_Command_t.StartCleaning_e.value, 0, 0, False, [0])
return telegram
def StopProcessTelegram(module, seqNum):
telegram = CreateTelegram(0x00, PacketTypes.COMMAND.value, seqNum, 0x42, 0x41,
API_Command_t.Stop_e.value, module, 0x00, False, [0])
return telegram
def StopAllProcessTelegram(seqNum):
telegram = CreateTelegram(
0x00, PacketTypes.COMMAND.value, seqNum, 0x42, 0x41, API_Command_t.Stop_e.value, 0, 0x00, False, [0])
return telegram
def RinseMilkOutletTelegram(rinseMode, side, seqNum):
array[0] * MILK_OUTLET_DATA_SIZE
array[0] = rinseMode
array[1] = MILK_TUBE_LENGTH & 0xff
array[2] = ((MILK_TUBE_LENGTH >> 8) & 0xff)
telegram = CreateTelegram(0x00, PacketTypes.COMMAND.value, seqNum, 0x42, 0x41,
API_Command_t.MilkOutletRinse_e.value, side, MILK_OUTLET_DATA_SIZE, True, array)
return telegram
def RinseRightMilkOutletTelegram(seqNum):
telegram = RinseMilkOutletTelegram(1, RIGHT_SIDE, seqNum)
return telegram
def RinseLeftMilkOutletTelegram(seqNum):
telegram = RinseMilkOutletTelegram(1, LEFT_SIDE, seqNum)
return telegram
def RinseRightTubesTelegram(seqNum):
telegram = RinseMilkOutletTelegram(2, RIGHT_SIDE, seqNum)
return telegram
def RinseLeftTubesTelegram(seqNum):
telegram = RinseMilkOutletTelegram(2, LEFT_SIDE, seqNum)
return telegram
def RinseRightTubesAndOutletTelegram(seqNum):
telegram = RinseMilkOutletTelegram(0, RIGHT_SIDE, seqNum)
return telegram
def RinseLeftTubesAndOutletTelegram(seqNum):
telegram = RinseMilkOutletTelegram(0, LEFT_SIDE, seqNum)
return telegram
def DoScreenRinseTelegram(side, seqNum):
array[0] * SCREEN_RINSE_DATA_SIZE
array[0] = 3 # screen rinse cycles
array[1] = 10 # repetitions
telegram = CreateTelegram(0x00, PacketTypes.COMMAND.value, seqNum, 0x42, 0x41,
API_Command_t.ScreenRinse_e.value, side, SCREEN_RINSE_DATA_SIZE, True, array)
return telegram
def DoRightScreenRinseTelegram(seqNum):
telegram = DoScreenRinseTelegram(RIGHT_SIDE, seqNum)
return telegram
def DoLeftScreenRinseTelegram(seqNum):
telegram = DoScreenRinseTelegram(LEFT_SIDE, seqNum)
return telegram
def GetStatusTelegram(seqNum):
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetStatus_e.value, 0, 0, False, [0])
return telegram
####################################### END OF TELEGRAMS ######################################################
####################################### START OF REQUESTS #####################################################
def GetRequest(seqNum,port):
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetRequests_e.value, 0, 0, False, [0])
# Get the request telegram
logger.info("\n\n Sending a request to get the set requests...")
# Get an ACK packet so the coffee machine is ready to send a response
InitRequest(port,seqNum,telegram)
logger.info("\nResending the request to receive response...")
# Change the sequence number for the new packet
seqNum += 1
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetRequests_e.value, 0, 0, False, [0])
# Get the response data
responseData = DoRequest(port,seqNum,telegram)
return responseData
#print([hex(x) for x in responseData])
def GetInfoMessage(seqNum,port):
array = [0] * 3
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetInfoMessages_e.value, 0, 3, True, array)
# Get the request telegram
logger.info("\n\n Sending a request to get the info messages...")
# Get an ACK packet so the coffee machine is ready to send a response
InitRequest(port,seqNum,telegram)
logger.info("\nResending the request to receive response...")
# Change the sequence number for the new packet
seqNum += 1
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetInfoMessages_e.value, 0, 3, True, array)
# Get the response data
responseData = DoRequest(port,seqNum,telegram)
return responseData
def DisplayAction(action, seqNum):
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.DisplayAction_e.value, action, 0, False, [0])
return telegram
def GetProductDump(side, seqNum,port):
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetProductDump_e.value, side, 0, False, [0])
# Get the request telegram
logger.info("\n\n Sending a request to get the info messages...")
# Get an ACK packet so the coffee machine is ready to send a response
InitRequest(port,seqNum,telegram)
logger.info("\nResending the request to receive response...")
# Change the sequence number for the new packet
seqNum += 1
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetProductDump_e.value, side, 0, False, [0])
# Get the response data
responseData = DoRequest(port,seqNum,telegram)
return responseData
def GetProductDumpRight(seqNum,port):
response = GetProductDump(RIGHT_SIDE, seqNum,port)
return response
def GetProductDumpLeft(seqNum,port):
response = GetProductDump(LEFT_SIDE, seqNum,port)
return response
def GetSensorValues(seqNum,port):
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetSensorValues_e.value, 0, 0, False, [0])
# Get the request telegram
logger.info("\n\n Sending a request to get the sensor values...")
# Get an ACK packet so the coffee machine is ready to send a response
InitRequest(port,seqNum,telegram)
logger.info("\nResending the request to receive response...")
# Change the sequence number for the new packet
seqNum += 1
telegram = CreateTelegram(0x00, PacketTypes.REQUEST.value, seqNum, 0x42, 0x41,
API_Command_t.GetSensorValues_e.value, 0, 0, False, [0])
# Get the response data
responseData = DoRequest(port,seqNum,telegram)
return responseData
####################################### ENF OF REQUESTS ######################################################
####################################### START OF CRC CALC ####################################################
crcPolynomTable = \
[
0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241,
0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440,
0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40,
0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841,
0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40,
0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41,
0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641,
0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040,
0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240,
0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441,
0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41,
0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840,
0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41,
0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40,
0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640,
0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041,
0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240,
0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441,
0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41,
0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840,
0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41,
0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40,
0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640,
0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041,
0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241,
0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440,
0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40,
0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841,
0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40,
0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41,
0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641,
0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040
]
def API_CalculateCRC(data_p, dataLength):
nIndex = 0
checkSum = 0
INIT_CRC = 0xFFFF
checkSum = INIT_CRC
while(nIndex < dataLength):
checkSum = ((checkSum >> 8) ^ crcPolynomTable[(
checkSum ^ data_p[nIndex]) & 0xFF])
nIndex += 1
return checkSum
####################################### END OF CRC CALC ######################################################
| bojandjukic1/SAP_CORK | Coffee_Machine/CGI Call and Script - New/commands.py | Python | gpl-3.0 | 24,743 | [
"ESPResSo"
] | a14c5cab31ed48904defeef916769f5b0e8693fe71facd168696e5657102e213 |
import tweepy
import json
import cPickle
import zmq
# Authentication details. To obtain these visit dev.twitter.com
consumer_key = 'PUpKEozSrCV5x06mc1uNDMgMN'
consumer_secret = 'q0W7N8wDCD41O9pPu8yej42BnJ138Gwpl2wQsB5wvxLqvciKQy'
access_token = '1315274378-RxFpa0iNmDucPaxMCENLaeeky9k4rL9JN0Zjn60'
access_token_secret = 'fjCiFRyXbIMFSGlykJMEhPxNxpj7YnbFreWoeCJBCLQCG'
tweet_list = ""
filters = ""
# This is the listener, resposible for receiving data
class StdOutListener(tweepy.StreamListener):
def on_data(self, data):
try:
string = socket1.recv_string(zmq.NOBLOCK)
except zmq.error.Again:
print "Nothing to Receive"
string = ""
if string:
print "*"*100
print string
filters = [string]
stream.disconnect()
stream.filter(track=filters)
return True
# Twitter returns data in JSON format - we need to decode it first
decoded = json.loads(data)
# Also, we convert UTF-8 to ASCII ignoring all bad characters sent by users
print '@%s: %s' % (decoded['user']['screen_name'], decoded['text'].encode('ascii', 'ignore'))
print ''
socket.send_string(u"%s" % (unicode(decoded['text'].encode('ascii', 'ignore'))))
print "Tweet Sent"
print "Waiting for next tweet..."
return True
def on_error(self, status):
print status
if __name__ == '__main__':
l = StdOutListener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5556")
context1 = zmq.Context()
socket1 = context1.socket(zmq.SUB)
socket1.connect("tcp://localhost:5559")
socket1.setsockopt_string(zmq.SUBSCRIBE, u"")
print "Beginning Stream:"
# There are different kinds of streams: public stream, user stream, multi-user streams
# In this example follow #programming tag
# For more details refer to https://dev.twitter.com/docs/streaming-apis
stream = tweepy.Stream(auth, l)
stream.filter(track=['dude', 'swag', 'bro']) | lnhubbell/Quarium | twitter_stream.py | Python | mit | 2,221 | [
"VisIt"
] | 2bd98c6d96fb2c12e097b3b3b7546aa01bd0e78d7acf0050ff62b19fe73a2d13 |
# -*- coding: utf-8 -*-
"""This module contains the controller classes of the application."""
# symbols which are imported by "from mse.controllers import *"
__all__ = ['Root']
# standard library imports
import os.path
# import logging
# log = logging.getLogger('mse.controllers')
# third-party imports
from cherrypy import request
from sqlobject import SQLObjectNotFound
from turbogears import controllers, expose, identity, redirect, visit, paginate
from turbogears import widgets, error_handler, validators, validate
from turbogears.toolbox.catwalk import CatWalk
import threading
# project specific imports
from mse.model import VisitIdentity
from async import *
# logging in after successfully registering a new user
def login_user(user):
"""Associate given user with current visit & identity."""
visit_key = visit.current().key
try:
link = VisitIdentity.by_visit_key(visit_key)
except SQLObjectNotFound:
link = None
if not link:
link = VisitIdentity(visit_key=visit_key, user_id=user.id)
else:
link.user_id = user.user_id
user_identity = identity.current_provider.load_identity(visit_key)
identity.set_current_identity(user_identity)
# Customer Classes
class RegistrationFields(widgets.WidgetsList):
userName = widgets.TextField(label="Username")
password = widgets.PasswordField(label="Password")
confirmPassword = widgets.PasswordField(label="Re-enter Password")
displayName = widgets.TextField(label="Your Name")
email = widgets.TextField(label="Email")
securityQuestion = widgets.SingleSelectField(label="Security Question",
options=["What's your mother's maiden name?",
"What's your first pet's name?",
"What is the last name of your favorite teacher?"])
securityAnswer = widgets.TextField(label="Security Answer")
consent = widgets.CheckBox(label="Informed Consent")
class RegistrationFieldsSchema(validators.Schema):
userName = validators.UnicodeString(max=16, not_empty=True, strip=True)
password = validators.UnicodeString(min=5, max=40, not_empty=True, strip=True)
confirmPassword = validators.UnicodeString(min=5, max=40, not_empty=True, strip=True)
displayName = validators.UnicodeString(not_empty=True, strip=True)
email = validators.Email(not_empty=True, strip=True)
securityQuestion = validators.OneOf(["What's your mother's maiden name?",
"What's your first pet's name?",
"What is the last name of your favorite teacher?"])
securityAnswer = validators.UnicodeString(not_empty=True, strip=True)
consent = validators.NotEmpty()
chained_validators = [validators.FieldsMatch('password', 'confirmPassword')]
class SearchFields(widgets.WidgetsList):
title = widgets.TextField(label="Assignment Title")
query = widgets.TextArea(label="Input Spectrum")
maxMass = widgets.TextField(label="Max. Peptide Mass")
minMass = widgets.TextField(label="Min. Peptide Mass")
massTolerance = widgets.TextField(label="Mass Tolerance")
specMode = widgets.SingleSelectField(label="Mode", options=["Positive", "Negative"],
default="Positive")
database = widgets.SingleSelectField(label="Databases",
options=["Ribosomal Proteins in Bacteria: Unreviewed",
"Ribosomal Proteins in Bacteria: Reviewed"],
default="Ribosomal Proteins in Bacteria: Reviewed")
class SearchFieldsSchema(validators.Schema):
title = validators.UnicodeString(not_empty=True, strip=True)
query = validators.UnicodeString(not_empty=True, strip=True)
maxMass = validators.Number(not_empty=True, strip=True)
minMass = validators.Number(not_empty=True, strip=True)
massTolerance = validators.Number(not_empty=True, strip=True)
specMode = validators.OneOf(["Positive", "Negative"])
database = validators.OneOf(["Ribosomal Proteins in Bacteria: Unreviewed",
"Ribosomal Proteins in Bacteria: Reviewed"])
registrationForm = widgets.TableForm(
fields=RegistrationFields(),
validator=RegistrationFieldsSchema(),
action="signupsubmit",
submit_text="Submit"
)
engineForm = widgets.TableForm(
fields=SearchFields(),
validator=SearchFieldsSchema(),
action="searchsubmit",
submit_text="Submit"
)
class Root(controllers.RootController):
catwalk = CatWalk(model)
catwalk = identity.SecureObject(catwalk, identity.in_group('admin'))
@expose('mse.templates.index')
def index(self):
siteTitle = "Welcome to MSE"
return dict(siteTitle=siteTitle)
@expose('mse.templates.about')
def about(self):
siteTitle = "Introduction"
directory = os.path.dirname(__file__)
with open(directory+'/static/text/abstract.txt', 'r') as AbstractContent:
abstract = AbstractContent.read().strip().split("\n\n")
with open(directory+'/static/text/references.txt', 'r') as ReferenceContent:
ref = ReferenceContent.read().strip().split("\n")
return dict(siteTitle=siteTitle, abstract=abstract, ref=ref)
@expose('mse.templates.contact')
def contact(self):
authors = [
{"name": "Mingda Jin",
"phone": "(123)456-789",
"email": "mj568@georgetown.edu"
},
{"name": "Nathan J Edwards",
"phone": "(123)456-789",
"email": "nje5@georgetown.edu"
}
]
siteTitle = "Contact"
return dict(contacts=authors, siteTitle=siteTitle)
@expose('mse.templates.login')
def login(self, forward_url=None, *args, **kw):
"""Show the login form or forward user to previously requested page."""
if forward_url:
if isinstance(forward_url, list):
forward_url = forward_url.pop(0)
else:
del request.params['forward_url']
new_visit = visit.current()
if new_visit:
new_visit = new_visit.is_new
if (not new_visit and not identity.current.anonymous
and identity.was_login_attempted()
and not identity.get_identity_errors()):
# Redirection
redirect(forward_url or '/searchlist', kw)
if identity.was_login_attempted():
if new_visit:
msg = _(u"Cannot log in because your browser "
"does not support session cookies.")
else:
msg = _(u"The credentials you supplied were not correct or "
"did not grant access to this resource.")
elif identity.get_identity_errors():
msg = _(u"You must provide your credentials before accessing "
"this resource.")
else:
#msg = _(u"Please log in.")
msg = _(u"")
if not forward_url:
forward_url = request.headers.get('Referer', '/')
# we do not set the response status here anymore since it
# is now handled in the identity exception.
return dict(logging_in=True, message=msg,
forward_url=forward_url, previous_url=request.path_info,
original_parameters=request.params)
@expose()
@identity.require(identity.not_anonymous())
def logout(self):
"""Log out the current identity and redirect to start page."""
identity.current.logout()
redirect('/')
@expose('mse.templates.signupForm')
def signupform(self):
siteTitle = "Sign up"
return dict(siteTitle=siteTitle, form=registrationForm)
@expose()
@validate(form=registrationForm)
@error_handler(signupform)
def signupsubmit(self, **kw):
# Create a user
model.User(user_name=kw['userName'], password=kw['password'], email_address=kw['email'],
display_name=kw['displayName'], security_question=kw['securityQuestion'],
security_answer=kw['securityAnswer'])
# Create a Group
#model.Group(group_name=u'guest', display_name=u'Guest Users')
#model.Group(group_name=u'admin', display_name=u'Admin Users')
# Assign created user to a group
group = model.Group.by_group_name(u'admin')
model.User.by_user_name(kw['userName']).addGroup(group)
# Login automatically after registration
user = model.User.by_user_name(kw['userName'])
login_user(user)
redirect('/signupconfirmation')
@expose('mse.templates.signupConfirmation')
def signupconfirmation(self):
siteTitle = "Welcome"
return dict(siteTitle=siteTitle)
@expose('mse.templates.searchForm')
@identity.require(identity.not_anonymous())
def searchform(self, **kw):
siteTitle = "Start Your Search"
u = identity.current.user
previousSearches = u.searches[::-1][:5]
return dict(siteTitle=siteTitle, form=engineForm, values=kw, searchHistory=previousSearches)
@expose()
@identity.require(identity.not_anonymous())
@validate(form=engineForm)
@error_handler(searchform)
def searchsubmit(self, **kw):
u = identity.current.user
model.SearchList(title=kw['title'], min_mass=kw['minMass'], max_mass=kw['maxMass'],
query=kw['query'], mass_tolerance=kw['massTolerance'], spec_mode=kw['specMode'],
database=kw['database'], user=u)
# Start the search thread immediately
t = threading.Thread(target=MicroorganismIdentification)
t.daemon = True
t.start()
redirect('/searchlist')
@expose('mse.templates.searchList')
@identity.require(identity.not_anonymous())
@paginate('searchData', default_order="-created")
def searchlist(self):
u = identity.current.user
userSearch = u.searches
siteTitle = "Your Searches"
return dict(siteTitle=siteTitle, searchData=userSearch)
@expose()
@identity.require(identity.not_anonymous())
def searchdelete(self, **kw):
#print searchID
#print "@" * 100
#model.SearchList.deleteBy(searchID)
redirect('/searchlist')
@expose('mse.templates.searchResult')
@identity.require(identity.not_anonymous())
@paginate('resultData', default_order="p_value")
def searchresult(self, searchID):
s = model.SearchList.get(searchID)
r = s.results
return dict(resultData=r) | jinmingda/MicroorganismSearchEngine | mse/controllers.py | Python | mit | 10,790 | [
"VisIt"
] | d119bb7db289c137eeee9a90632f14cdee783d326139d5858517465746f71044 |
# -*- coding:utf-8 -*-
#
# Compute the normal and the gaussian curvature of a mesh
#
# Based on :
# Discrete Differential-Geometry Operators for Triangulated 2-Manifolds
# Mark Meyer, Mathieu Desbrun, Peter Schröder, Alan H. Barr
# VisMath '02, Berlin (Germany)
# External dependencies
import math
import numpy as np
# Compute the normal curvature vectors of a given mesh
def GetNormalCurvatureReference( mesh ) :
# Initialisation
normal_curvature = np.zeros( mesh.vertices.shape )
mixed_area = GetMixedArea( mesh )
# Loop through the faces
for a, b, c in mesh.faces :
# Get the vertices
va, vb, vc = mesh.vertices[[a, b, c]]
# Compute cotangent of each angle
cota = Cotangent3( va, vb, vc )
cotb = Cotangent3( vb, va, vc )
cotc = Cotangent3( vc, va, vb )
# Add vectors to vertex normal curvature
normal_curvature[a] += (va-vc) * cotb + (va-vb) * cotc
normal_curvature[b] += (vb-vc) * cota + (vb-va) * cotc
normal_curvature[c] += (vc-va) * cotb + (vc-vb) * cota
# Weight the normal curvature vectors by the mixed area
normal_curvature /= 2.0 * mixed_area.reshape( (len(mesh.vertices),1) )
# Remove border vertices
normal_curvature[ mesh.GetBorderVertices() ] = 0.0
# Return the normal curvature vector array
return normal_curvature
# Compute the mixed area of every vertex of a given mesh
def GetMixedArea( mesh ) :
# Initialisation
mixed_area = np.zeros( len(mesh.vertices) )
# Create an indexed view of the triangles
tris = mesh.vertices[ mesh.faces ]
# Compute triangle area
face_area = np.sqrt( (np.cross( tris[::,1] - tris[::,0], tris[::,2] - tris[::,0] ) ** 2).sum(axis=1) ) / 2.0
# Loop through the faces
for a, b, c in mesh.faces :
# Get the vertices
va, vb, vc = mesh.vertices[[a, b, c]]
# Compute cotangent of each angle
cota = Cotangent3( va, vb, vc )
cotb = Cotangent3( vb, va, vc )
cotc = Cotangent3( vc, va, vb )
# Compute triangle area
face_area = np.sqrt((np.cross((vb-va),(vc-va))**2).sum()) / 2.0
# Obtuse triangle cases (Voronoi inappropriate)
if np.dot( vb-va, vc-va ) < 0 :
mixed_area[a] += face_area / 2.0
mixed_area[b] += face_area / 4.0
mixed_area[c] += face_area / 4.0
elif np.dot( va-vb, vc-vb ) < 0 :
mixed_area[a] += face_area / 4.0
mixed_area[b] += face_area / 2.0
mixed_area[c] += face_area / 4.0
elif np.dot( va-vc, vb-vc ) < 0 :
mixed_area[a] += face_area / 4.0
mixed_area[b] += face_area / 4.0
mixed_area[c] += face_area / 2.0
# Non-obtuse triangle case (Voronoi area)
else :
u = ( (va - vb) ** 2 ).sum()
v = ( (va - vc) ** 2 ).sum()
w = ( (vb - vc) ** 2 ).sum()
mixed_area[a] += ( u * cotc + v * cotb ) / 8.0
mixed_area[b] += ( u * cotc + w * cota ) / 8.0
mixed_area[c] += ( v * cotb + w * cota ) / 8.0
# Return the mixed area of every vertex
return mixed_area
# Compute the normal curvature vectors of a given mesh
def GetNormalCurvature( mesh ) :
# Create an indexed view of the triangles
tris = mesh.vertices[ mesh.faces ]
# Compute the edge vectors of the triangles
u = tris[::,1] - tris[::,0]
v = tris[::,2] - tris[::,1]
w = tris[::,0] - tris[::,2]
# Compute the cotangent of the triangle angles
cotangent = np.array( [ Cotangent( u, -w ), Cotangent( v, -u ), Cotangent( w, -v ) ] ).T
# Compute triangle area
face_area = np.sqrt( (np.cross( u, -w ) ** 2).sum(axis=1) ) / 2.0
# Tell if there is an obtuse angle in the triangles
obtuse_angle = ( np.array( [ (-u*w).sum(axis=1), (-u*v).sum(axis=1), (-w*v).sum(axis=1) ] ) < 0 ).T
# Compute the voronoi area of the vertices in each face
voronoi_area = np.array( [ cotangent[::,2] * (u**2).sum(axis=1) + cotangent[::,1] * (w**2).sum(axis=1),
cotangent[::,0] * (v**2).sum(axis=1) + cotangent[::,2] * (u**2).sum(axis=1),
cotangent[::,0] * (v**2).sum(axis=1) + cotangent[::,1] * (w**2).sum(axis=1) ] ).T / 8.0
# Compute the mixed area of each vertex
mixed_area = np.zeros( mesh.vertex_number )
for i, (a, b, c) in enumerate( mesh.faces ) :
# Mixed area - Non-obtuse triangle case (Voronoi area)
if (obtuse_angle[i] == False).all() :
mixed_area[a] += voronoi_area[i,0]
mixed_area[b] += voronoi_area[i,1]
mixed_area[c] += voronoi_area[i,2]
# Mixed area - Obtuse triangle cases (Voronoi inappropriate)
elif obtuse_angle[i,0] :
mixed_area[a] += face_area[i] / 2.0
mixed_area[b] += face_area[i] / 4.0
mixed_area[c] += face_area[i] / 4.0
elif obtuse_angle[i,1] :
mixed_area[a] += face_area[i] / 4.0
mixed_area[b] += face_area[i] / 2.0
mixed_area[c] += face_area[i] / 4.0
else :
mixed_area[a] += face_area[i] / 4.0
mixed_area[b] += face_area[i] / 4.0
mixed_area[c] += face_area[i] / 2.0
# Compute the curvature part of the vertices in each face
vertex_curvature = np.array( [ w * cotangent[::,1].reshape(-1,1) - u * cotangent[::,2].reshape(-1,1),
u * cotangent[::,2].reshape(-1,1) - v * cotangent[::,0].reshape(-1,1),
v * cotangent[::,0].reshape(-1,1) - w * cotangent[::,1].reshape(-1,1) ] )
# Compute the normal curvature vector of each vertex
normal_curvature = np.zeros( mesh.vertices.shape )
for i, (a, b, c) in enumerate( mesh.faces ) :
normal_curvature[a] += vertex_curvature[0,i]
normal_curvature[b] += vertex_curvature[1,i]
normal_curvature[c] += vertex_curvature[2,i]
# Weight the normal curvature vectors by the mixed area
normal_curvature /= 2.0 * mixed_area.reshape( -1, 1 )
# Remove border vertices
normal_curvature[ mesh.GetBorderVertices() ] = 0.0
# Return the normal curvature vector array
return normal_curvature
# Compute vertex gaussian curvature
def GetGaussianCurvatureReference( mesh ) :
# Resize gaussian curvature array
gaussian_curvature = np.zeros( mesh.vertex_number )
# Loop through the vertices
for i in range( mesh.vertex_number ) :
area = 0.0
angle_sum = 0.0
# Get the 1-ring neighborhood
for f in mesh.neighbor_faces[i] :
if mesh.faces[f, 0] == i :
area += VoronoiRegionArea( mesh.vertices[i], mesh.vertices[mesh.faces[f,1]], mesh.vertices[mesh.faces[f,2]] )
angle_sum += AngleFromCotan3( mesh.vertices[i], mesh.vertices[mesh.faces[f,1]], mesh.vertices[mesh.faces[f,2]] )
elif mesh.faces[f, 1] == i :
area += VoronoiRegionArea( mesh.vertices[i], mesh.vertices[mesh.faces[f,2]], mesh.vertices[mesh.faces[f,0]] )
angle_sum += AngleFromCotan3( mesh.vertices[i], mesh.vertices[mesh.faces[f,2]], mesh.vertices[mesh.faces[f,0]] )
else :
area += VoronoiRegionArea( mesh.vertices[i], mesh.vertices[mesh.faces[f,0]], mesh.vertices[mesh.faces[f,1]] )
angle_sum += AngleFromCotan3( mesh.vertices[i], mesh.vertices[mesh.faces[f,0]], mesh.vertices[mesh.faces[f,1]] )
gaussian_curvature[i] = ( 2.0 * math.pi - angle_sum ) / area
# Remove border vertices
gaussian_curvature[ mesh.GetBorderVertices() ] = 0.0
return gaussian_curvature
# Cotangent between two arrays of vectors
def Cotangent( u, v ) :
return ( u * v ).sum(axis=1) / np.sqrt( ( u**2 ).sum(axis=1) * ( v**2 ).sum(axis=1) - ( u * v ).sum(axis=1) ** 2 )
# Cotangent between three points
def Cotangent3( vo, va, vb ) :
u = va - vo
v = vb - vo
lu = np.dot( u, u )
lv = np.dot( v, v )
dot_uv = np.dot( u, v )
return dot_uv / math.sqrt( lu * lv - dot_uv * dot_uv )
# AngleFromCotan
def AngleFromCotan( u, v ) :
udotv = np.dot( u, v )
denom = (u**2).sum()*(v**2).sum() - udotv*udotv;
return abs( math.atan2( math.sqrt(denom), udotv ) )
def AngleFromCotan3( vo, va, vb ) :
u = va - vo
v = vb - vo
udotv = np.dot( u , v )
lu = np.dot( u, u )
lv = np.dot( v, v )
denom = lu*lv - udotv*udotv
return abs( math.atan2( math.sqrt(denom), udotv ) )
# RegionArea
def VoronoiRegionArea( vo, va, vb ) :
face_area = math.sqrt( ( np.cross( (va-vo), (vb-vo) )**2 ).sum() ) * 0.5
if face_area == 0.0 : return 0.0
if ObtuseAngle(vo, va, vb) :
return face_area * 0.5
if ObtuseAngle(va, vb, vo) or ObtuseAngle(vb, vo, va) :
return face_area * 0.25
return (Cotangent3(va, vo, vb)*((vo-vb)**2).sum() + Cotangent3(vb, vo, va)*((vo-va)**2).sum()) * 0.125
# ObtuseAngle
def ObtuseAngle( vo, va, vb ) :
return np.dot( va-vo, vb-vo ) < 0
| microy/PyMeshToolkit | MeshToolkit/Core/Curvature.py | Python | mit | 8,082 | [
"Gaussian"
] | e95a779d0a3747f8ed1b9e5f028fbae51297037d695aa287a6ceebd16261789b |
#!/usr/bin/env python
#JSON {"lot": "UKS/6-31G*",
#JSON "scf": "ODASCFSolver",
#JSON "linalg": "CholeskyLinalgFactory",
#JSON "difficulty": 3,
#JSON "description": "Basic UKS DFT example with LDA exhange-correlation functional (Dirac+VWN)"}
from horton import *
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/methyl.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g*')
# Create a linalg factory
lf = DenseLinalgFactory(obasis.nbasis)
# Compute Gaussian integrals
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
# Define a numerical integration grid needed the XC functionals
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers)
# Create alpha orbitals
exp_alpha = lf.create_expansion()
exp_beta = lf.create_expansion()
# Initial guess
guess_core_hamiltonian(olp, kin, na, exp_alpha, exp_beta)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UGridGroup(obasis, grid, [
ULibXCLDA('x'),
ULibXCLDA('c_vwn'),
]),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons, 4 beta electrons)
occ_model = AufbauOccModel(5, 4)
# Converge WFN with Optimal damping algorithm (ODA) SCF
# - Construct the initial density matrix (needed for ODA).
occ_model.assign(exp_alpha, exp_beta)
dm_alpha = exp_alpha.to_dm()
dm_beta = exp_beta.to_dm()
# - SCF solver
scf_solver = ODASCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, dm_alpha, dm_beta)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = lf.create_two_index()
fock_beta = lf.create_two_index()
ham.reset(dm_alpha, dm_beta)
ham.compute_energy()
ham.compute_fock(fock_alpha, fock_beta)
exp_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
exp_beta.from_fock_and_dm(fock_beta, dm_beta, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the ODA algorithm can only really construct an
# optimized density matrix and no orbitals.
mol.title = 'UKS computation on methyl'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.exp_alpha = exp_alpha
mol.exp_beta = exp_beta
mol.dm_alpha = dm_alpha
mol.dm_beta = dm_beta
# useful for visualization:
mol.to_file('methyl.molden')
# useful for post-processing (results stored in double precision):
mol.to_file('methyl.h5')
| eustislab/horton | data/examples/hf_dft/uks_methyl_lda.py | Python | gpl-3.0 | 2,833 | [
"DIRAC",
"Gaussian"
] | 75590f6323cf066ea4efafe86698f69b6f594a21176b754db054e338a6426736 |
#### PATTERN | WEB #################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# Python API interface for various web services (Google, Twitter, Wikipedia, ...)
# smgllib.py is removed from Python 3, a warning is issued in Python 2.6+. Ignore for now.
import warnings; warnings.filterwarnings(action='ignore', category=DeprecationWarning, module="sgmllib")
import threading
import time
import os
import socket, urlparse, urllib, urllib2
import base64
import htmlentitydefs
import sgmllib
import re
import xml.dom.minidom
import StringIO
import bisect
import new
import api
import feed
import oauth
import json
import locale
from feed import feedparser
from soup import BeautifulSoup
try:
# Import persistent Cache.
# If this module is used separately, a dict is used (i.e. for this Python session only).
from cache import Cache, cache, TMP
except:
cache = {}
try:
from imap import Mail, MailFolder, Message, GMAIL
from imap import MailError, MailServiceError, MailLoginError, MailNotLoggedIn
from imap import FROM, SUBJECT, DATE, BODY, ATTACHMENTS
except:
pass
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
#### UNICODE #######################################################################################
def decode_utf8(string):
""" Returns the given string as a unicode string (if possible).
"""
if isinstance(string, str):
for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")):
try:
return string.decode(*encoding)
except:
pass
return string
return unicode(string)
def encode_utf8(string):
""" Returns the given string as a Python byte string (if possible).
"""
if isinstance(string, unicode):
try:
return string.encode("utf-8")
except:
return string
return str(string)
u = decode_utf8
s = encode_utf8
# For clearer source code:
bytestring = s
#### ASYNCHRONOUS REQUEST ##########################################################################
class AsynchronousRequest:
def __init__(self, function, *args, **kwargs):
""" Executes the function in the background.
AsynchronousRequest.done is False as long as it is busy, but the program will not halt in the meantime.
AsynchronousRequest.value contains the function's return value once done.
AsynchronousRequest.error contains the Exception raised by an erronous function.
For example, this is useful for running live web requests while keeping an animation running.
For good reasons, there is no way to interrupt a background process (i.e. Python thread).
You are responsible for ensuring that the given function doesn't hang.
"""
self._response = None # The return value of the given function.
self._error = None # The exception (if any) raised by the function.
self._time = time.time()
self._function = function
self._thread = threading.Thread(target=self._fetch, args=(function,)+args, kwargs=kwargs)
self._thread.start()
def _fetch(self, function, *args, **kwargs):
""" Executes the function and sets AsynchronousRequest.response.
"""
try:
self._response = function(*args, **kwargs)
except Exception, e:
self._error = e
def now(self):
""" Waits for the function to finish and yields its return value.
"""
self._thread.join(); return self._response
@property
def elapsed(self):
return time.time() - self._time
@property
def done(self):
return not self._thread.isAlive()
@property
def value(self):
return self._response
@property
def error(self):
return self._error
def __repr__(self):
return "AsynchronousRequest(function='%s')" % self._function.__name__
def asynchronous(function, *args, **kwargs):
""" Returns an AsynchronousRequest object for the given function.
"""
return AsynchronousRequest(function, *args, **kwargs)
send = asynchronous
#### URL ###########################################################################################
# User agent and referrer.
# Used to identify the application accessing the web.
USER_AGENT = "Pattern/2.3 +http://www.clips.ua.ac.be/pages/pattern"
REFERRER = "http://www.clips.ua.ac.be/pages/pattern"
# Mozilla user agent.
# Websites can include code to block out any application except browsers.
MOZILLA = "Mozilla/5.0"
# HTTP request method.
GET = "get" # Data is encoded in the URL.
POST = "post" # Data is encoded in the message body.
# URL parts.
# protocol://username:password@domain:port/path/page?query_string#anchor
PROTOCOL, USERNAME, PASSWORD, DOMAIN, PORT, PATH, PAGE, QUERY, ANCHOR = \
"protocol", "username", "password", "domain", "port", "path", "page", "query", "anchor"
# MIME type.
MIMETYPE_WEBPAGE = ["text/html"]
MIMETYPE_STYLESHEET = ["text/css"]
MIMETYPE_PLAINTEXT = ["text/plain"]
MIMETYPE_PDF = ["application/pdf"]
MIMETYPE_NEWSFEED = ["application/rss+xml", "application/atom+xml"]
MIMETYPE_IMAGE = ["image/gif", "image/jpeg", "image/png", "image/tiff"]
MIMETYPE_AUDIO = ["audio/mpeg", "audio/mp4", "audio/x-aiff", "audio/x-wav"]
MIMETYPE_VIDEO = ["video/mpeg", "video/mp4", "video/quicktime"]
MIMETYPE_ARCHIVE = ["application/x-stuffit", "application/x-tar", "application/zip"]
MIMETYPE_SCRIPT = ["application/javascript", "application/ecmascript"]
def extension(filename):
""" Returns the extension in the given filename: "cat.jpg" => ".jpg".
"""
return os.path.splitext(filename)[1]
def urldecode(query):
""" Inverse operation of urllib.urlencode.
Returns a dictionary of (name, value)-items from a URL query string.
"""
def _format(s):
if s == "None":
return None
if s.isdigit():
return int(s)
try: return float(s)
except:
return s
query = [(kv.split("=")+[None])[:2] for kv in query.lstrip("?").split("&")]
query = [(urllib.unquote_plus(bytestring(k)), urllib.unquote_plus(bytestring(v))) for k, v in query]
query = [(u(k), u(v)) for k, v in query]
query = [(k, _format(v) or None) for k, v in query]
query = dict([(k,v) for k, v in query if k != ""])
return query
url_decode = urldecode
def proxy(host, protocol="https"):
""" Returns the value for the URL.open() proxy parameter.
- host: host address of the proxy server.
"""
return (host, protocol)
class URLError(Exception):
pass # URL contains errors (e.g. a missing t in htp://).
class URLTimeout(URLError):
pass # URL takes to long to load.
class HTTPError(URLError):
pass # URL causes an error on the contacted server.
class HTTP301Redirect(HTTPError):
pass # Too many redirects.
# The site may be trying to set a cookie and waiting for you to return it,
# or taking other measures to discern a browser from a script.
# For specific purposes you should build your own urllib2.HTTPRedirectHandler
# and pass it to urllib2.build_opener() in URL.open()
class HTTP400BadRequest(HTTPError):
pass # URL contains an invalid request.
class HTTP401Authentication(HTTPError):
pass # URL requires a login and password.
class HTTP403Forbidden(HTTPError):
pass # URL is not accessible (user-agent?)
class HTTP404NotFound(HTTPError):
pass # URL doesn't exist on the internet.
class HTTP420Error(HTTPError):
pass # Used by Twitter for rate limiting.
class HTTP500InternalServerError(HTTPError):
pass # Generic server error.
class URL:
def __init__(self, string=u"", method=GET, query={}):
""" URL object with the individual parts available as attributes:
For protocol://username:password@domain:port/path/page?query_string#anchor:
- URL.protocol: http, https, ftp, ...
- URL.username: username for restricted domains.
- URL.password: password for restricted domains.
- URL.domain : the domain name, e.g. nodebox.net.
- URL.port : the server port to connect to.
- URL.path : the server path of folders, as a list, e.g. ['news', '2010']
- URL.page : the page name, e.g. page.html.
- URL.query : the query string as a dictionary of (name, value)-items.
- URL.anchor : the page anchor.
If method is POST, the query string is sent with HTTP POST.
"""
self.__dict__["method"] = method # Use __dict__ directly since __setattr__ is overridden.
self.__dict__["_string"] = u(string)
self.__dict__["_parts"] = None
self.__dict__["_headers"] = None
self.__dict__["_redirect"] = None
if isinstance(string, URL):
self.__dict__["method"] = string.method
self.query.update(string.query)
if len(query) > 0:
# Requires that we parse the string first (see URL.__setattr__).
self.query.update(query)
def _parse(self):
""" Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:pass@example.com:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlparse.urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":")+[u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = int(P[PORT])
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2])-len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P
# URL.string yields unicode(URL) by joining the different parts,
# if the URL parts have been modified.
def _get_string(self): return unicode(self)
def _set_string(self, v):
self.__dict__["_string"] = u(v)
self.__dict__["_parts"] = None
string = property(_get_string, _set_string)
@property
def parts(self):
""" Yields a dictionary with the URL parts.
"""
if not self._parts: self._parse()
return self._parts
@property
def querystring(self):
""" Yields the URL querystring: "www.example.com?page=1" => "page=1"
"""
s = self.parts[QUERY].items()
s = dict((bytestring(k), bytestring(v if v is not None else "")) for k, v in s)
s = urllib.urlencode(s)
return s
def __getattr__(self, k):
if k in self.__dict__ : return self.__dict__[k]
if k in self.parts : return self.__dict__["_parts"][k]
raise AttributeError, "'URL' object has no attribute '%s'" % k
def __setattr__(self, k, v):
if k in self.__dict__ : self.__dict__[k] = u(v); return
if k == "string" : self._set_string(v); return
if k == "query" : self.parts[k] = v; return
if k in self.parts : self.__dict__["_parts"][k] = u(v); return
raise AttributeError, "'URL' object has no attribute '%s'" % k
def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
""" Returns a connection to the url from which data can be retrieved with connection.read().
When the timeout amount of seconds is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError (e.g. HTTP404NotFound).
"""
url = self.string
# Use basic urllib.urlopen() instead of urllib2.urlopen() for local files.
if os.path.exists(url):
return urllib.urlopen(url)
# Get the query string as a separate parameter if method=POST.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
if proxy:
proxy = urllib2.ProxyHandler({proxy[1]: proxy[0]})
proxy = urllib2.build_opener(proxy, urllib2.HTTPHandler)
urllib2.install_opener(proxy)
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 301: raise HTTP301Redirect
if e.code == 400: raise HTTP400BadRequest
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
if e.code == 420: raise HTTP420Error
if e.code == 500: raise HTTP500InternalServerError
raise HTTPError
except socket.timeout:
raise URLTimeout
except urllib2.URLError, e:
if e.reason == "timed out" \
or e.reason[0] in (36, "timed out"):
raise URLTimeout
raise URLError, e.reason
except ValueError, e:
raise URLError, e
def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in cache:
if isinstance(cache, dict): # Not a Cache object.
return cache[id]
if unicode is True:
return cache[id]
if unicode is False:
return cache.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default) cache the data.
data = self.open(timeout, proxy, user_agent, referrer, authentication).read()
if unicode is True:
data = u(data)
if cached:
cache[id] = data
if throttle:
time.sleep(max(throttle-(time.time()-t), 0))
return data
def read(self, *args):
return self.open().read(*args)
@property
def exists(self, timeout=10):
""" Yields False if the URL generates a HTTP404NotFound error.
"""
try: self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError, URLTimeoutError:
return True
except URLError:
return False
except:
return True
return True
@property
def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None
@property
def headers(self, timeout=10):
""" Yields a dictionary with the HTTP response headers.
"""
if self.__dict__["_headers"] is None:
try:
h = dict(self.open(timeout).info())
except URLError:
h = {}
self.__dict__["_headers"] = h
return self.__dict__["_headers"]
@property
def redirect(self, timeout=10):
""" Yields the redirected URL, or None.
"""
if self.__dict__["_redirect"] is None:
try:
r = self.open(timeout).geturl()
except URLError:
r = None
self.__dict__["_redirect"] = r != self.string and r or ""
return self.__dict__["_redirect"] or None
def __str__(self):
return bytestring(self.string)
def __unicode__(self):
# The string representation includes the query attributes with HTTP GET.
# This gives us the advantage of not having to parse the URL
# when no separate query attributes were given (e.g. all info is in URL._string):
if self._parts is None and self.method == GET:
return self._string
P = self._parts
u = []
if P[PROTOCOL]:
u.append("%s://" % P[PROTOCOL])
if P[USERNAME]:
u.append("%s:%s@" % (P[USERNAME], P[PASSWORD]))
if P[DOMAIN]:
u.append(P[DOMAIN])
if P[PORT]:
u.append(":%s" % P[PORT])
if P[PATH]:
u.append("/%s/" % "/".join(P[PATH]))
if P[PAGE] and len(u) > 0:
u[-1] = u[-1].rstrip("/")
if P[PAGE]:
u.append("/%s" % P[PAGE])
if P[QUERY] and self.method == GET:
u.append("?%s" % self.querystring)
if P[ANCHOR]:
u.append("#%s" % P[ANCHOR])
u = u"".join(u)
u = u.lstrip("/")
return u
def __repr__(self):
return "URL('%s', method='%s')" % (str(self), str(self.method))
def copy(self):
return URL(self.string, self.method, self.query)
def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode)
#url = URL("http://user:pass@example.com:992/animal/bird?species#wings")
#print url.parts
#print url.query
#print url.string
#--- STREAMING URL BUFFER --------------------------------------------------------------------------
def bind(object, method, function):
""" Attaches the function as a method with the given name to the given object.
"""
setattr(object, method, new.instancemethod(function, object))
class Stream(list):
def __init__(self, url, delimiter="\n", **kwargs):
""" Buffered stream of data from a given URL.
"""
self.socket = URL(url).open(**kwargs)
self.buffer = ""
self.delimiter = delimiter
def update(self, bytes=1024):
""" Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets
def parse(self, data):
""" Must be overridden in a subclass.
"""
return data
def clear(self):
list.__init__(self, [])
def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
""" Returns a new Stream with the given parse method.
"""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream
#--- FIND URLs -------------------------------------------------------------------------------------
RE_URL_PUNCTUATION = ("\"'{(>", "\"'.,;)}")
RE_URL_HEAD = r"[%s|\[|\s]" % "|".join(RE_URL_PUNCTUATION[0]) # Preceded by space, parenthesis or HTML tag.
RE_URL_TAIL = r"[%s|\]]*[\s|\<]" % "|".join(RE_URL_PUNCTUATION[1]) # Followed by space, punctuation or HTML tag.
RE_URL1 = r"(https?://.*?)" + RE_URL_TAIL # Starts with http:// or https://
RE_URL2 = RE_URL_HEAD + r"(www\..*?\..*?)" + RE_URL_TAIL # Starts with www.
RE_URL3 = RE_URL_HEAD + r"([\w|-]*?\.(com|net|org))" + RE_URL_TAIL # Ends with .com, .net, .org
RE_URL1, RE_URL2, RE_URL3 = (
re.compile(RE_URL1, re.I),
re.compile(RE_URL2, re.I),
re.compile(RE_URL3, re.I))
def find_urls(string, unique=True):
""" Returns a list of URLs parsed from the string.
Works on http://, https://, www. links or domain names ending in .com, .org, .net.
Links can be preceded by leading punctuation (open parens)
and followed by trailing punctuation (period, comma, close parens).
"""
string = u(string)
string = string.replace(u"\u2024", ".")
string = string.replace(" ", " ")
matches = []
for p in (RE_URL1, RE_URL2, RE_URL3):
for m in p.finditer(" %s " % string):
s = m.group(1)
s = s.split("\">")[0].split("'>")[0] # google.com">Google => google.com
if not unique or s not in matches:
matches.append(s)
return matches
links = find_urls
RE_EMAIL = re.compile(r"[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+") # tom.de+smedt@clips.ua.ac.be
def find_email(string, unique=True):
""" Returns a list of e-mail addresses parsed from the string.
"""
string = u(string).replace(u"\u2024", ".")
matches = []
for m in RE_EMAIL.finditer(string):
s = m.group(0)
if not unique or s not in matches:
matches.append(s)
return matches
def find_between(a, b, string):
""" Returns a list of substrings between a and b in the given string.
"""
p = "%s(.*?)%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return [m for m in p.findall(string)]
#### PLAIN TEXT ####################################################################################
BLOCK = [
"title", "h1", "h2", "h3", "h4", "h5", "h6", "p",
"center", "blockquote", "div", "table", "ul", "ol", "pre", "code", "form"
]
SELF_CLOSING = ["br", "hr", "img"]
# Element tag replacements for a stripped version of HTML source with strip_tags().
# Block-level elements are followed by linebreaks,
# list items are preceded by an asterisk ("*").
LIST_ITEM = "*"
blocks = dict.fromkeys(BLOCK+["br", "tr", "td"], ("", "\n\n"))
blocks.update({
"li": ("%s " % LIST_ITEM, "\n"),
"img": ("", ""),
"br": ("", "\n"),
"th": ("", "\n"),
"tr": ("", "\n"),
"td": ("", "\t"),
})
class HTMLParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
def clean(self, html):
html = decode_utf8(html)
html = html.replace("/>", " />")
html = html.replace(" />", " />")
html = html.replace("<!", "<!")
html = html.replace("<!DOCTYPE", "<!DOCTYPE")
html = html.replace("<!doctype", "<!doctype")
html = html.replace("<!--", "<!--")
return html
def parse_declaration(self, i):
# We can live without sgmllib's parse_declaration().
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
return i + 1
def convert_charref(self, name):
# This fixes a bug in older versions of sgmllib when working with Unicode.
# Fix: ASCII ends at 127, not 255
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return chr(n)
class HTMLTagstripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def strip(self, html, exclude=[], replace=blocks):
""" Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data)
def clean(self, html):
# Escape all entities (just strip tags).
return HTMLParser.clean(self, html).replace("&", "&")
def handle_starttag(self, tag, attributes):
if tag in self._exclude:
# Create the tag attribute string,
# including attributes defined in the HTMLTagStripper._exclude dict.
a = len(self._exclude[tag]) > 0 and attributes or []
a = ["%s=\"%s\"" % (k,v) for k, v in a if k in self._exclude[tag]]
a = (" "+" ".join(a)).rstrip()
self._data.append("<%s%s>" % (tag, a))
if tag in self._replace:
self._data.append(self._replace[tag][0])
if tag in self._replace and tag in SELF_CLOSING:
self._data.append(self._replace[tag][1])
def handle_endtag(self, tag):
if tag in self._exclude and self._data and self._data[-1].startswith("<"+tag):
# Never keep empty elements (e.g. <a></a>).
self._data.pop(-1); return
if tag in self._exclude:
self._data.append("</%s>" % tag)
if tag in self._replace:
self._data.append(self._replace[tag][1])
def handle_data(self, data):
self._data.append(data.strip("\n\t"))
def handle_comment(self, comment):
if "comment" in self._exclude or \
"!--" in self._exclude:
self._data.append("<!--%s-->" % comment)
# As a function:
strip_tags = HTMLTagstripper().strip
def strip_element(string, tag, attributes=""):
""" Removes all elements with the given tagname and attributes from the string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", "href='foo' class='bar'")
matches "<a href='foo' class='bar'" but not "<a class='bar' href='foo'".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
i = s.find("<%s%s" % (t, a), i)
j = s.find("</%s>" % t, i+1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j+1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0: return string
if j < 0: return string[:i]
string = string[:i] + string[j+len(t)+3:]; s=string.lower()
return string
def strip_between(a, b, string):
""" Removes anything between (and including) string a and b inside the given string.
"""
p = "%s.*?%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return re.sub(p, "", string)
def strip_javascript(html):
return strip_between("<script.*?>", "</script>", html)
def strip_inline_css(html):
return strip_between("<style.*?>", "</style>", html)
def strip_comments(html):
return strip_between("<!--", "-->", html)
def strip_forms(html):
return strip_between("<form.*?>", "</form>", html)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, (str, unicode)):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == '' :
return unichr(int(name)) # "&" => "&"
if hex in ("x","X"):
return unichr(int('0x'+name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return cp and unichr(cp) or match.group() # "&foo;" => "&foo;"
if isinstance(string, (str, unicode)):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
def encode_url(string):
return urllib.quote_plus(bytestring(string))
def decode_url(string):
return urllib.unquote_plus(string) # "black/white" => "black%2Fwhite".
RE_SPACES = re.compile("( |\xa0)+", re.M) # Matches one or more spaces.
RE_TABS = re.compile(r"\t+", re.M) # Matches one or more tabs.
def collapse_spaces(string, indentation=False, replace=" "):
""" Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_tabs(string, indentation=False, replace=" "):
""" Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_linebreaks(string, threshold=1):
""" Returns a string with consecutive linebreaks collapsed to at most the given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n+r"+", n, string)
return string
def plaintext(html, keep=[], replace=blocks, linebreaks=2, indentation=False):
""" Returns a string with all HTML tags removed.
Content inside HTML comments, the <style> tag and the <script> tags is removed.
- keep : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are followed by linebreaks.
- linebreaks : the maximum amount of consecutive linebreaks,
- indentation : keep left line indentation (tabs and spaces)?
"""
if not keep.__contains__("script"):
html = strip_javascript(html)
if not keep.__contains__("style"):
html = strip_inline_css(html)
if not keep.__contains__("form"):
html = strip_forms(html)
if not keep.__contains__("comment") and \
not keep.__contains__("!--"):
html = strip_comments(html)
html = html.replace("\r", "\n")
html = strip_tags(html, exclude=keep, replace=replace)
html = decode_entities(html)
html = collapse_spaces(html, indentation)
html = collapse_tabs(html, indentation)
html = collapse_linebreaks(html, linebreaks)
html = html.strip()
return html
#### SEARCH ENGINE #################################################################################
SEARCH = "search" # Query for pages (i.e. links to websites).
IMAGE = "image" # Query for images.
NEWS = "news" # Query for news items.
TINY = "tiny" # Image size around 100x100.
SMALL = "small" # Image size around 200x200.
MEDIUM = "medium" # Image size around 500x500.
LARGE = "large" # Image size around 1000x1000.
RELEVANCY = "relevancy" # Sort results by most relevant.
LATEST = "latest" # Sort results by most recent.
class Result(dict):
def __init__(self, url):
""" An item in a list of results returned by SearchEngine.search().
All dictionary entries are available as unicode string attributes.
- url : the URL of the referred web content,
- title : the title of the content at the URL,
- text : the content text,
- language: the content language,
- author : for news items and images, the author,
- date : for news items, the publication date.
"""
dict.__init__(self)
self.url = url
@property
def description(self):
return self.text # Backwards compatibility.
def download(self, *args, **kwargs):
""" Download the content at the given URL.
By default it will be cached - see URL.download().
"""
return URL(self.url).download(*args, **kwargs)
def __getattr__(self, k):
return self.get(k, u"")
def __getitem__(self, k):
return self.get(k, u"")
def __setattr__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"") # Store strings as unicode.
def __setitem__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"")
def setdefault(self, k, v):
dict.setdefault(self, u(k), u(v))
def update(self, *args, **kwargs):
map = dict()
map.update(*args, **kwargs)
dict.update(self, [(u(k), u(v)) for k, v in map.items()])
def __repr__(self):
return "Result(url=%s)" % repr(self.url)
class Results(list):
def __init__(self, source=None, query=None, type=SEARCH, total=0):
""" A list of results returned from SearchEngine.search().
- source: the service that yields the results (e.g. GOOGLE, TWITTER).
- query : the query that yields the results.
- type : the query type (SEARCH, IMAGE, NEWS).
- total : the total result count.
This is not the length of the list, but the total number of matches for the given query.
"""
self.source = source
self.query = query
self.type = type
self.total = total
class SearchEngine:
def __init__(self, license=None, throttle=1.0, language=None):
""" A base class for a web service.
- license : license key for the API,
- throttle : delay between requests (avoid hammering the server).
Inherited by: Google, Yahoo, Bing, Twitter, Wikipedia, Flickr.
"""
self.license = license
self.throttle = throttle # Amount of sleep time after executing a query.
self.language = language # Result.language restriction (e.g., "en").
self.format = lambda x: x # Formatter applied to each attribute of each Result.
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
return Results(source=None, query=query, type=type)
class SearchEngineError(HTTPError):
pass
class SearchEngineTypeError(SearchEngineError):
pass # Raised when an unknown type is passed to SearchEngine.search().
class SearchEngineLimitError(SearchEngineError):
pass # Raised when the query limit for a license is reached.
#--- GOOGLE ----------------------------------------------------------------------------------------
# Google Custom Search is a paid service.
# https://code.google.com/apis/console/
# http://code.google.com/apis/customsearch/v1/overview.html
GOOGLE = "https://www.googleapis.com/customsearch/v1?"
GOOGLE_LICENSE = api.license["Google"]
GOOGLE_CUSTOM_SEARCH_ENGINE = "000579440470800426354:_4qo2s0ijsi"
# Search results can start with: "Jul 29, 2007 ...",
# which is the date of the page parsed by Google from the content.
RE_GOOGLE_DATE = re.compile("^([A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}) {0,1}...")
class Google(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or GOOGLE_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start-1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("htmlSnippet").replace("<br> ","").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results
def translate(self, string, input="en", output="fr", **kwargs):
""" Returns the translation of the given string in the desired output language.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string,
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data)
def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data
#--- YAHOO -----------------------------------------------------------------------------------------
# Yahoo BOSS is a paid service.
# http://developer.yahoo.com/search/
YAHOO = "http://yboss.yahooapis.com/ysearch/"
YAHOO_LICENSE = api.license["Yahoo"]
class Yahoo(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or YAHOO_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": encode_url(query),
"start": 1 + (start-1) * count,
"count": min(count, type==IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) BOSS OAuth authentication.
url.query.update({
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, method=GET, secret=self.license[1])
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Yahoo %s API is a paid service" % type
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get({SEARCH:"web", IMAGE:"images", NEWS:"news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and \
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results
#--- BING ------------------------------------------------------------------------------------------
# https://datamarket.azure.com/dataset/5BA839F1-12CE-4CCE-BF57-A49D98D29A44
# https://datamarket.azure.com/account/info
BING = "https://api.datamarket.azure.com/Bing/Search/"
BING_LICENSE = api.license["Bing"]
class Bing(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or BING_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from Bing for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 15 for news,
- size : for images, either SMALL, MEDIUM or LARGE.
There is no daily query limit.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
src = "Web"
if type == IMAGE:
src = "Image"
if type == NEWS:
src = "News"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(BING + src + "?", query, type)
# 1) Construct request URL.
url = URL(BING + "Composite", method=GET, query={
"Sources": "'" + src.lower() + "'",
"Query": "'" + query + "'",
"$skip": 1 + (start-1) * count,
"$top": min(count, type==NEWS and 15 or 50),
"$format": "json",
})
# 2) Restrict image size.
if size in (TINY, SMALL, MEDIUM, LARGE):
url.query["ImageFilters"] = {
TINY: "'Size:Small'",
SMALL: "'Size:Small'",
MEDIUM: "'Size:Medium'",
LARGE: "'Size:Large'" }[size]
# 3) Restrict language.
if type in (SEARCH, IMAGE) and self.language is not None:
url.query["Query"] = url.query["Query"][:-1] + " language: %s'" % self.language
#if self.language is not None:
# market = locale.market(self.language)
# if market:
# url.query["market"] = market
# 4) Parse JSON response.
kwargs["authentication"] = ("", self.license)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Bing %s API is a paid service" % type
data = json.loads(data)
data = data.get("d", {})
data = data.get("results", [{}])[0]
results = Results(BING, query, type)
results.total = int(data.get(src+"Total", 0))
for x in data.get(src, []):
r = Result(url=None)
r.url = self.format(x.get("MediaUrl", x.get("Url")))
r.title = self.format(x.get("Title"))
r.text = self.format(x.get("Description", x.get("Snippet")))
r.language = self.language or ""
r.date = self.format(x.get("DateTime", x.get("Date")))
r.author = self.format(x.get("Source"))
results.append(r)
return results
#--- TWITTER ---------------------------------------------------------------------------------------
# http://apiwiki.twitter.com/
TWITTER = "http://search.twitter.com/"
TWITTER_STREAM = "https://stream.twitter.com/1/statuses/filter.json"
TWITTER_STATUS = "https://twitter.com/%s/status/%s"
TWITTER_LICENSE = api.license["Twitter"]
TWITTER_HASHTAG = re.compile(r"(\s|^)(#[a-z0-9_\-]+)", re.I) # Word starts with "#".
TWITTER_RETWEET = re.compile(r"(\s|^RT )(@[a-z0-9_\-]+)", re.I) # Word starts with "RT @".
class Twitter(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or TWITTER_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
""" Returns a list of results from Twitter for the given query.
- type : SEARCH or TRENDS,
- start: maximum 1500 results (10 for trends) => start 1-15 with count=100, 1500/count,
- count: maximum 100, or 10 for trends.
There is an hourly limit of 150+ queries (actual amount undisclosed).
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 1500 / count:
return Results(TWITTER, query, type)
# 1) Construct request URL.
url = URL(TWITTER + "search.json?", method=GET)
url.query = {
"q": query,
"page": start,
"rpp": min(count, 100)
}
if "geo" in kwargs:
# Filter by location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius "10km".
url.query["geocode"] = ",".join((map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 2) Restrict language.
url.query["lang"] = self.language or ""
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("results", data.get("trends", [])):
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("from_user"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at", data.get("as_of")))
r.author = self.format(x.get("from_user"))
r.profile = self.format(x.get("profile_image_url")) # Profile picture URL.
r.language = self.format(x.get("iso_language_code"))
results.append(r)
return results
def trends(self, **kwargs):
""" Returns a list with 10 trending topics on Twitter.
"""
url = URL("https://api.twitter.com/1/trends/1.json")
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
return [u(x.get("name")) for x in data[0].get("trends", [])]
def stream(self, query):
""" Returns a live stream of Result objects for the given query.
"""
url = URL(TWITTER_STREAM)
url.query.update({
"track": query,
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_token": self.license[2][0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, GET,
self.license[1],
self.license[2][1])
return TwitterStream(url, delimiter="\n", format=self.format)
class TwitterStream(Stream):
def __init__(self, socket, delimiter="\n", format=lambda s: s):
Stream.__init__(self, socket, delimiter)
self.format = format
def parse(self, data):
""" TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively.
"""
x = json.loads(data)
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.profile = self.format(x.get("profile_image_url"))
r.language = self.format(x.get("iso_language_code"))
return r
def author(name):
""" Returns a Twitter query-by-author-name that can be passed to Twitter.search().
For example: Twitter().search(author("tom_de_smedt"))
"""
return "from:%s" % name
def hashtags(string):
""" Returns a list of hashtags (words starting with a #hash) from a tweet.
"""
return [b for a, b in TWITTER_HASHTAG.findall(string)]
def retweets(string):
""" Returns a list of retweets (words starting with a RT @author) from a tweet.
"""
return [b for a, b in TWITTER_RETWEET.findall(string)]
#stream = Twitter().stream("cat")
#for i in range(10):
# stream.update()
# for tweet in reversed(stream):
# print tweet.text
# print tweet.url
# print
#stream.clear()
#--- MEDIAWIKI -------------------------------------------------------------------------------------
# http://en.wikipedia.org/w/api.php
WIKIA = "http://wikia.com"
WIKIPEDIA = "http://wikipedia.com"
WIKIPEDIA_LICENSE = api.license["Wikipedia"]
MEDIAWIKI_LICENSE = None
MEDIAWIKI = "http://{SUBDOMAIN}.{DOMAIN}{API}"
# Pattern for meta links (e.g. Special:RecentChanges).
# http://en.wikipedia.org/wiki/Main_namespace
MEDIAWIKI_NAMESPACE = ["Main", "User", "Wikipedia", "File", "MediaWiki", "Template", "Help", "Category", "Portal", "Book"]
MEDIAWIKI_NAMESPACE += [s+" talk" for s in MEDIAWIKI_NAMESPACE] + ["Talk", "Special", "Media"]
MEDIAWIKI_NAMESPACE += ["WP", "WT", "MOS", "C", "CAT", "Cat", "P", "T", "H", "MP", "MoS", "Mos"]
_mediawiki_namespace = re.compile(r"^"+"|".join(MEDIAWIKI_NAMESPACE)+":", re.I)
# Pattern to identify disambiguation pages.
MEDIAWIKI_DISAMBIGUATION = "<a href=\"/wiki/Help:Disambiguation\" title=\"Help:Disambiguation\">disambiguation</a> page"
# Pattern to identify references, e.g. [12]
MEDIAWIKI_REFERENCE = r"\s*\[[0-9]{1,3}\]"
class MediaWiki(SearchEngine):
def __init__(self, license=None, throttle=5.0, language="en"):
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
@property
def _url(self):
# Must be overridden in a subclass; see Wikia and Wikipedia.
return None
@property
def MediaWikiArticle(self):
return MediaWikiArticle
@property
def MediaWikiSection(self):
return MediaWikiSection
@property
def MediaWikiTable(self):
return MediaWikiTable
def __iter__(self):
return self.all()
def all(self, **kwargs):
""" Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.list(), MediaWiki.search() and URL.download().
"""
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
articles = all
def list(self, namespace=0, start=None, count=100, cached=True, **kwargs):
""" Returns an iterator over all article titles (for a given namespace id).
"""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration
def search(self, query, type=SEARCH, start=1, count=1, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
if type != SEARCH:
raise SearchEngineTypeError
if count < 1:
return None
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ","_"),
"redirects": 1,
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("timeout", 30) # Parsing the article takes some time.
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a
def _parse_article(self, data, **kwargs):
return self.MediaWikiArticle(
title = plaintext(data.get("displaytitle", data.get("title", ""))),
source = data.get("text", {}).get("*", ""),
disambiguation = data.get("text", {}).get("*", "").find(MEDIAWIKI_DISAMBIGUATION) >= 0,
links = [x["*"] for x in data.get("links", []) if not _mediawiki_namespace.match(x["*"])],
categories = [x["*"] for x in data.get("categories", [])],
external = [x for x in data.get("externallinks", [])],
media = [x for x in data.get("images", [])],
languages = dict([(x["lang"], x["*"]) for x in data.get("langlinks", [])]),
language = self.language,
parser = self, **kwargs)
def _parse_article_sections(self, article, data):
# If "References" is a section in the article,
# the HTML will contain a marker <h*><span class="mw-headline" id="References">.
# http://en.wikipedia.org/wiki/Section_editing
t = article.title
d = 0
i = 0
for x in data.get("sections", {}):
a = x.get("anchor")
if a:
p = r"<h.>\s*.*?\s*<span class=\"mw-headline\" id=\"%s\">" % a
p = re.compile(p)
m = p.search(article.source, i)
if m:
j = m.start()
article.sections.append(self.MediaWikiSection(article,
title = t,
start = i,
stop = j,
level = d))
t = x.get("line", "")
d = int(x.get("level", 2)) - 1
i = j
return article
def _parse_article_section_structure(self, article):
# Sections with higher level are children of previous sections with lower level.
for i, s2 in enumerate(article.sections):
for s1 in reversed(article.sections[:i]):
if s1.level < s2.level:
s2.parent = s1
s1.children.append(s2)
break
return article
class MediaWikiArticle:
def __init__(self, title=u"", source=u"", links=[], categories=[], languages={}, disambiguation=False, **kwargs):
""" A MediaWiki article returned from MediaWiki.search().
MediaWikiArticle.string contains the HTML content.
"""
self.title = title # Article title.
self.source = source # Article HTML content.
self.sections = [] # Article sections.
self.links = links # List of titles of linked articles.
self.categories = categories # List of categories. As links, prepend "Category:".
self.external = [] # List of external links.
self.media = [] # List of linked media (images, sounds, ...)
self.disambiguation = disambiguation # True when the article is a disambiguation page.
self.languages = languages # Dictionary of (language, article)-items, e.g. Cat => ("nl", "Kat")
self.language = kwargs.get("language", "en")
self.parser = kwargs.get("parser", MediaWiki())
for k, v in kwargs.items():
setattr(self, k, v)
def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
s = strip_between("<table class=\"metadata", "</table>", s) # Metadata.
s = strip_between("<table id=\"toc", "</table>", s) # Table of contents.
s = strip_between("<table class=\"infobox", "</table>", s) # Infobox.
s = strip_between("<table class=\"wikitable", "</table>", s) # Table.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_between("<div id=\"annotation", "</div>", s) # Annotations.
s = strip_between("<div class=\"dablink", "</div>", s) # Disambiguation message.
s = strip_between("<div class=\"magnify", "</div>", s) # Thumbnails.
s = strip_between("<div class=\"thumbcaption", "</div>", s) # Thumbnail captions.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s) # LaTex math images.
s = plaintext(s, **kwargs)
s = re.sub(r"\[edit\]\s*", "", s) # [edit] is language dependent (e.g. nl => "[bewerken]")
s = s.replace("[", " [").replace(" [", " [") # Space before inline references.
return s
def plaintext(self, **kwargs):
return self._plaintext(self.source, **kwargs)
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
def __repr__(self):
return "MediaWikiArticle(title=%s)" % repr(self.title)
class MediaWikiSection:
def __init__(self, article, title=u"", start=0, stop=0, level=1):
""" A (nested) section in the content of a MediaWikiArticle.
"""
self.article = article # MediaWikiArticle the section is part of.
self.parent = None # MediaWikiSection the section is part of.
self.children = [] # MediaWikiSections belonging to this section.
self.title = title # Section title.
self._start = start # Section start index in MediaWikiArticle.string.
self._stop = stop # Section stop index in MediaWikiArticle.string.
self._level = level # Section depth (main title + intro = level 0).
self._tables = None
def plaintext(self, **kwargs):
return self.article._plaintext(self.source, **kwargs)
@property
def source(self):
return self.article.source[self._start:self._stop]
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
@property
def content(self):
# ArticleSection.string, minus the title.
s = self.plaintext()
if s == self.title or s.startswith(self.title+"\n"):
return s[len(self.title):].lstrip()
return s
@property
def tables(self):
""" Yields a list of MediaWikiTable objects in the section.
"""
if self._tables is None:
self._tables = []
b = "<table class=\"wikitable\"", "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title = p((f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source = b[0] + s + b[1]
)
for i, row in enumerate(f(r"<tr", "</tr>", s)):
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update MediaWikiTable.headers.
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (((f(r'colspan="', r'"', v)+[1])[0], v[v.find(">")+1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []; [[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables
@property
def level(self):
return self._level
depth = level
def __repr__(self):
return "MediaWikiSection(title='%s')" % bytestring(self.title)
class MediaWikiTable:
def __init__(self, section, title=u"", headers=[], rows=[], source=u""):
""" A <table class="wikitable> in a MediaWikiSection.
"""
self.section = section # MediaWikiSection the table is part of.
self.source = source # Table HTML.
self.title = title # Table title.
self.headers = headers # List of table headers.
self.rows = rows # List of table rows, each a list of cells.
@property
def html(self):
return self.source
def __repr__(self):
return "MediaWikiTable(title='%s')" % bytestring(self.title)
#--- MEDIAWIKI: WIKIPEDIA --------------------------------------------------------------------------
class Wikipedia(MediaWiki):
def __init__(self, license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[language].wikipedia.org.
"""
SearchEngine.__init__(self, license or WIKIPEDIA_LICENSE, throttle, language)
self._subdomain = language
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikipedia.org")
s = s.replace("{API}", '/w/api.php')
return s
@property
def MediaWikiArticle(self):
return WikipediaArticle
@property
def MediaWikiSection(self):
return WikipediaSection
@property
def MediaWikiTable(self):
return WikipediaTable
class WikipediaArticle(MediaWikiArticle):
def download(self, media, **kwargs):
""" Downloads an item from MediaWikiArticle.media and returns the content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (self.__dict__.get("language", "en"), media)
if url not in cache:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL("http://" + data.group(0)).download(**kwargs) or None
return data
def __repr__(self):
return "WikipediaArticle(title=%s)" % repr(self.title)
class WikipediaSection(MediaWikiSection):
def __repr__(self):
return "WikipediaSection(title='%s')" % bytestring(self.title)
class WikipediaTable(MediaWikiTable):
def __repr__(self):
return "WikipediaTable(title='%s')" % bytestring(self.title)
#article = Wikipedia().search("cat")
#for section in article.sections:
# print " "*(section.level-1) + section.title
#if article.media:
# data = article.download(article.media[2])
# f = open(article.media[2], "w")
# f.write(data)
# f.close()
#
#article = Wikipedia(language="nl").search("borrelnootje")
#print article.string
#--- MEDIAWIKI: WIKIA ------------------------------------------------------------------------------
class Wikia(MediaWiki):
def __init__(self, domain="www", license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[domain].wikia.com.
"""
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
self._subdomain = domain
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikia.com")
s = s.replace("{API}", '/api.php')
return s
@property
def MediaWikiArticle(self):
return WikiaArticle
@property
def MediaWikiSection(self):
return WikiaSection
@property
def MediaWikiTable(self):
return WikiaTable
def all(self, **kwargs):
if kwargs.pop("batch", True):
# We can take advantage of Wikia's search API to reduce bandwith.
# Instead of executing a query to retrieve each article,
# we query for a batch of (10) articles.
iterator = self.list(_id="pageid", **kwargs)
while True:
batch, done = [], False
try:
for i in range(10): batch.append(iterator.next())
except StopIteration:
done = True # No more articles, finish batch and raise StopIteration.
url = URL(self._url.replace("api.php", "wikia.php"), method=GET, query={
"controller": "WikiaSearch",
"method": "getPages",
"ids": '|'.join(str(id) for id in batch),
"format": "json"
})
kwargs.setdefault("unicode", True)
kwargs.setdefault("cached", True)
kwargs["timeout"] = 10 * (1 + len(batch))
data = url.download(**kwargs)
data = json.loads(data)
for x in (data or {}).get("pages", {}).values():
yield WikiaArticle(title=x.get("title", ""), source=x.get("html", ""))
if done:
raise StopIteration
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
class WikiaArticle(MediaWikiArticle):
def __repr__(self):
return "WikiaArticle(title=%s)" % repr(self.title)
class WikiaSection(MediaWikiSection):
def __repr__(self):
return "WikiaSection(title='%s')" % bytestring(self.title)
class WikiaTable(MediaWikiTable):
def __repr__(self):
return "WikiaTable(title='%s')" % bytestring(self.title)
#--- FLICKR ----------------------------------------------------------------------------------------
# http://www.flickr.com/services/api/
FLICKR = "http://api.flickr.com/services/rest/"
FLICKR_LICENSE = api.license["Flickr"]
INTERESTING = "interesting"
class Flickr(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or FLICKR_LICENSE, throttle, language)
def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500/count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR+"?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": { RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc" }.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results
class FlickrResult(Result):
@property
def url(self):
# Retrieving the url of a FlickrResult (i.e. image location) requires another query.
# Note: the "Original" size no longer appears in the response,
# so Flickr might not like it if we download it.
url = FLICKR + "?method=flickr.photos.getSizes&photo_id=%s&api_key=%s" % (self._id, self._license)
data = URL(url).download(throttle=self._throttle, unicode=True)
data = xml.dom.minidom.parseString(bytestring(data))
size = { TINY: "Thumbnail",
SMALL: "Small",
MEDIUM: "Medium",
LARGE: "Original" }.get(self._size, "Medium")
for x in data.getElementsByTagName("size"):
if size == x.getAttribute("label"):
return x.getAttribute("source")
if size == "Original":
url = x.getAttribute("source")
url = url[:-len(extension(url))-2] + "_o" + extension(url)
return u(url)
#images = Flickr().search("kitten", count=10, size=SMALL)
#for img in images:
# print bytestring(img.description)
# print img.url
#
#data = img.download()
#f = open("kitten"+extension(img.url), "w")
#f.write(data)
#f.close()
#--- FACEBOOK --------------------------------------------------------------------------------------
# Facebook public status updates.
# https://developers.facebook.com/docs/reference/api/
FACEBOOK = "https://graph.facebook.com/"
FACEBOOK_LICENSE = api.license["Facebook"]
FEED = "feed" # Facebook timeline.
COMMENTS = "comments" # Facebook comments (for a given news feed post).
LIKES = "likes" # Facebook likes (for a given post or comment).
FRIENDS = "friends" # Facebook friends (for a given profile id).
class FacebookResult(Result):
def __repr__(self):
return "Result(id=%s)" % repr(self.id)
class Facebook(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
@property
def _token(self):
# Yields the "application access token" (stored in api.license["Facebook"]).
# With this license, we can view public content.
# To view more information, we need a "user access token" as license key.
# This token can be retrieved manually from:
# http://www.clips.ua.ac.be/media/pattern-fb.html
# Or parsed from this URL:
# https://graph.facebook.com/oauth/authorize?type=user_agent
# &client_id=332061826907464
# &redirect_uri=http%3A%2F%2Fwww.clips.ua.ac.be/media/pattern-facebook-token.html
# &scope=read_stream,user_birthday,user_likes,user_photos,friends_birthday,friends_likes
# The token is valid for a limited duration.
return URL(FACEBOOK + "oauth/access_token?", query={
"grant_type": "client_credentials",
"client_id": "332061826907464",
"client_secret": "81ff4204e73ecafcd87635a3a3683fbe"
}).download().split("=")[1]
def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
""" Returns a list of results from Facebook public status updates for the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"fields": ",".join(("id", "link", "message", "created_time", "from")),
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max),
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + (u(query) or "me").replace(FACEBOOK, "") + "/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max)
})
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(x.get("story", x.get("message")))
r.date = self.format(x.get("created_time"))
# Store likes & comments count as int, author as (id, name)-tuple
# (by default Result will store everything as Unicode strings).
s = lambda r, k, v: dict.__setitem__(r, k, v)
s(r, "likes", \
self.format(x.get("like_count", x.get("likes", {}).get("count", 0))) + 0)
s(r, "comments", \
self.format(x.get("comments", {}).get("count", 0)) + 0)
s(r, "author", (
u(self.format(x.get("from", {}).get("id", ""))), \
u(self.format(x.get("from", {}).get("name", "")))))
# Replace Result.text with author name for likes.
if type in (LIKES, FRIENDS):
s(r, "author", (
u(self.format(x.get("id", ""))),
u(self.format(x.get("name", "")))))
r.text = \
self.format(x.get("name"))
# Replace Result.url Facebook URL with object id.
if r.url.startswith("http://www.facebook.com/photo"):
r.url = x.get("picture", r.url)
# Replace Result.url Facebook URL with full-size image.
if r.url.startswith("http://www.facebook.com/") and \
r.url.split("/")[-1].split("?")[0].isdigit():
r.url = r.url.split("/")[-1].split("?")[0].replace("_s", "_b")
results.append(r)
return results
def profile(self, id=None, **kwargs):
""" For the given author id or alias,
returns a (id, name, date of birth, gender, locale)-tuple.
"""
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return (
u(data.get("id", "")),
u(data.get("name", "")),
u(data.get("birthday", "")),
u(data.get("gender", "")[:1]),
u(data.get("locale", ""))
)
#license = "" # Generate a license key at: http://www.clips.ua.ac.be/media/pattern-fb.html
#fb = Facebook(license)
#me = fb.profile()[0]
#for r in fb.search(me, type=NEWS, count=10):
# print r.id
# print r.text
# print r.url
# if r.comments > 0:
# print "%s comments:" % r.comments
# print [(r.text, r.author) for r in fb.search(r, type=COMMENTS)]
# if r.likes > 0:
# print "%s likes:" % r.likes
# print [r.author for r in fb.search(r, type=LIKES)]
# print
#--- PRODUCT REVIEWS -------------------------------------------------------------------------------
PRODUCTWIKI = "http://api.productwiki.com/connect/api.aspx"
PRODUCTWIKI_LICENSE = api.license["Products"]
class Products(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or PRODUCTWIKI_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Productwiki for the given query.
Each Result.reviews is a list of (review, score)-items.
- type : SEARCH,
- start: maximum undefined,
- count: 20,
- sort : RELEVANCY.
There is no daily limit.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(PRODUCTWIKI, query, type)
# 1) Construct request URL.
url = PRODUCTWIKI+"?"
url = URL(url, method=GET, query={
"key": self.license or "",
"q": query,
"page" : start,
"op": "search",
"fields": "proscons", # "description,proscons" is heavy.
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = URL(url).download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(PRODUCTWIKI, query, type)
results.total = None
for x in data.get("products", [])[:count]:
r = Result(url=None)
r.__dict__["title"] = u(x.get("title"))
r.__dict__["text"] = u(x.get("text"))
r.__dict__["reviews"] = []
reviews = x.get("community_review") or {}
for p in reviews.get("pros", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or +1))
for p in reviews.get("cons", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or -1))
r.__dict__["score"] = int(sum(score for review, score in r.reviews))
results.append(r)
# Highest score first.
results.sort(key=lambda r: r.score, reverse=True)
return results
#for r in Products().search("tablet"):
# print r.title
# print r.score
# print r.reviews
# print
#--- NEWS FEED -------------------------------------------------------------------------------------
# Based on the Universal Feed Parser by Mark Pilgrim:
# http://www.feedparser.org/
class Newsfeed(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
""" Returns a list of results from the given RSS or Atom newsfeed URL.
"""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value") for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and \
x.get("content")[0].get("language") or \
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) => Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results
feeds = {
"Nature": "http://feeds.nature.com/nature/rss/current",
"Science": "http://www.sciencemag.org/rss/podcast.xml",
"Herald Tribune": "http://www.iht.com/rss/frontpage.xml",
"TIME": "http://feeds.feedburner.com/time/topstories",
"CNN": "http://rss.cnn.com/rss/edition.rss",
}
#for r in Newsfeed().search(feeds["Nature"]):
# print r.title
# print r.author
# print r.url
# print plaintext(r.text)
# print
#--- QUERY -----------------------------------------------------------------------------------------
def query(string, service=GOOGLE, **kwargs):
""" Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (TWITTER, "twitter"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError, "unknown search engine '%s'" % service
#--- WEB SORT --------------------------------------------------------------------------------------
SERVICES = {
GOOGLE : Google,
YAHOO : Yahoo,
BING : Bing,
TWITTER : Twitter,
WIKIPEDIA : Wikipedia,
WIKIA : Wikia,
FLICKR : Flickr,
FACEBOOK : Facebook
}
def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, reverse=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = reverse and context+" "+word or word+" "+context
q.strip()
q = strict and "\"%s\"" % q or q
r = service.search(q, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1)/s, r.query) for r in R]
R = sorted(R, reverse=True)
return R
#print sort(["black", "happy"], "darth vader", GOOGLE)
#### DOCUMENT OBJECT MODEL #########################################################################
# Tree traversal of HTML source code.
# The Document Object Model (DOM) is a cross-platform and language-independent convention
# for representing and interacting with objects in HTML, XHTML and XML documents.
# BeautifulSoup is wrapped in Document, Element and Text classes that resemble the Javascript DOM.
# BeautifulSoup can of course be used directly since it is imported here.
# http://www.crummy.com/software/BeautifulSoup/
SOUP = (
BeautifulSoup.BeautifulSoup,
BeautifulSoup.Tag,
BeautifulSoup.NavigableString,
BeautifulSoup.Comment
)
NODE, TEXT, COMMENT, ELEMENT, DOCUMENT = \
"node", "text", "comment", "element", "document"
#--- NODE ------------------------------------------------------------------------------------------
class Node:
def __init__(self, html, type=NODE, **kwargs):
""" The base class for Text, Comment and Element.
All DOM nodes can be navigated in the same way (e.g. Node.parent, Node.children, ...)
"""
self.type = type
self._p = not isinstance(html, SOUP) and BeautifulSoup.BeautifulSoup(u(html), **kwargs) or html
@property
def _beautifulSoup(self):
# If you must, access the BeautifulSoup object with Node._beautifulSoup.
return self._p
def __eq__(self, other):
# Two Node objects containing the same BeautifulSoup object, are the same.
return isinstance(other, Node) and hash(self._p) == hash(other._p)
def _wrap(self, x):
# Navigating to other nodes yields either Text, Element or None.
if isinstance(x, BeautifulSoup.Comment):
return Comment(x)
if isinstance(x, BeautifulSoup.Declaration):
return Text(x)
if isinstance(x, BeautifulSoup.NavigableString):
return Text(x)
if isinstance(x, BeautifulSoup.Tag):
return Element(x)
@property
def parent(self):
return self._wrap(self._p.parent)
@property
def children(self):
return hasattr(self._p, "contents") and [self._wrap(x) for x in self._p.contents] or []
@property
def html(self):
return self.__unicode__()
@property
def source(self):
return self.__unicode__()
@property
def next_sibling(self):
return self._wrap(self._p.nextSibling)
@property
def previous_sibling(self):
return self._wrap(self._p.previousSibling)
next, previous = next_sibling, previous_sibling
def traverse(self, visit=lambda node: None):
""" Executes the visit function on this node and each of its child nodes.
"""
visit(self); [node.traverse(visit) for node in self.children]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def __getitem__(self, index):
return self.children[index]
def __repr__(self):
return "Node(type=%s)" % repr(self.type)
def __str__(self):
return bytestring(self.__unicode__())
def __unicode__(self):
return u(self._p)
#--- TEXT ------------------------------------------------------------------------------------------
class Text(Node):
""" Text represents a chunk of text without formatting in a HTML document.
For example: "the <b>cat</b>" is parsed to [Text("the"), Element("cat")].
"""
def __init__(self, string):
Node.__init__(self, string, type=TEXT)
def __repr__(self):
return "Text(%s)" % repr(self._p)
class Comment(Text):
""" Comment represents a comment in the HTML source code.
For example: "<!-- comment -->".
"""
def __init__(self, string):
Node.__init__(self, string, type=COMMENT)
def __repr__(self):
return "Comment(%s)" % repr(self._p)
#--- ELEMENT ---------------------------------------------------------------------------------------
class Element(Node):
def __init__(self, html):
""" Element represents an element or tag in the HTML source code.
For example: "<b>hello</b>" is a "b"-Element containing a child Text("hello").
"""
Node.__init__(self, html, type=ELEMENT)
@property
def tagname(self):
return self._p.name
tag = tagName = tagname
@property
def attributes(self):
return self._p._getAttrMap()
@property
def id(self):
return self.attributes.get("id")
def get_elements_by_tagname(self, v):
""" Returns a list of nested Elements with the given tag name.
The tag name can include a class (e.g. div.header) or an id (e.g. div#content).
"""
if isinstance(v, basestring) and "#" in v:
v1, v2 = v.split("#")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, id=v2)]
if isinstance(v, basestring) and "." in v:
v1, v2 = v.split(".")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, v2)]
return [Element(x) for x in self._p.findAll(v in ("*","") or v.lower())]
by_tag = getElementsByTagname = get_elements_by_tagname
def get_element_by_id(self, v):
""" Returns the first nested Element with the given id attribute value.
"""
return ([Element(x) for x in self._p.findAll(id=v, limit=1) or []]+[None])[0]
by_id = getElementById = get_element_by_id
def get_elements_by_classname(self, v):
""" Returns a list of nested Elements with the given class attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, v))]
by_class = getElementsByClassname = get_elements_by_classname
def get_elements_by_attribute(self, **kwargs):
""" Returns a list of nested Elements with the given attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, attrs=kwargs))]
by_attribute = getElementsByAttribute = get_elements_by_attribute
@property
def content(self):
""" Yields the element content as a unicode string.
"""
return u"".join([u(x) for x in self._p.contents])
@property
def source(self):
""" Yields the HTML source as a unicode string (tag + content).
"""
return u(self._p)
html = source
def __getattr__(self, k):
if k in self.__dict__:
return self.__dict__[k]
if k in self.attributes:
return self.attributes[k]
raise AttributeError, "'Element' object has no attribute '%s'" % k
def __repr__(self):
return "Element(tag='%s')" % bytestring(self.tagname)
#--- DOCUMENT --------------------------------------------------------------------------------------
class Document(Element):
def __init__(self, html, **kwargs):
""" Document is the top-level element in the Document Object Model.
It contains nested Element, Text and Comment nodes.
"""
# Aliases for BeautifulSoup optional parameters:
kwargs["selfClosingTags"] = kwargs.pop("self_closing", kwargs.get("selfClosingTags"))
Node.__init__(self, u(html).strip(), type=DOCUMENT, **kwargs)
@property
def declaration(self):
""" Yields the <!doctype> declaration, as a TEXT Node or None.
"""
for child in self.children:
if isinstance(child._p, BeautifulSoup.Declaration):
return child
@property
def head(self):
return self._wrap(self._p.head)
@property
def body(self):
return self._wrap(self._p.body)
@property
def tagname(self):
return None
tag = tagname
def __repr__(self):
return "Document()"
DOM = Document
#article = Wikipedia().search("Document Object Model")
#dom = DOM(article.html)
#print dom.get_element_by_id("References").source
#print [element.attributes["href"] for element in dom.get_elements_by_tagname("a")]
#print dom.get_elements_by_tagname("p")[0].next.previous.children[0].parent.__class__
#print
#### WEB CRAWLER ###################################################################################
# Tested with a crawl across 1,000 domain so far.
class Link:
def __init__(self, url, text="", relation="", referrer=""):
""" A hyperlink parsed from a HTML document, in the form:
<a href="url"", title="text", rel="relation">xxx</a>.
"""
self.url, self.text, self.relation, self.referrer = \
u(url), u(text), u(relation), u(referrer),
@property
def description(self):
return self.text
def __repr__(self):
return "Link(url=%s)" % repr(self.url)
# Used for sorting in Spider.links:
def __eq__(self, link):
return self.url == link.url
def __ne__(self, link):
return self.url != link.url
def __lt__(self, link):
return self.url < link.url
def __gt__(self, link):
return self.url > link.url
class HTMLLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def parse(self, html, url=""):
""" Returns a list of Links parsed from the given HTML string.
"""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data
def handle_starttag(self, tag, attributes):
if tag == "a":
attributes = dict(attributes)
if "href" in attributes:
link = Link(url = attributes.get("href"),
text = attributes.get("title"),
relation = attributes.get("rel", ""),
referrer = self._url)
self._data.append(link)
def base(url):
""" Returns the URL domain name:
http://en.wikipedia.org/wiki/Web_crawler => en.wikipedia.org
"""
return urlparse.urlparse(url).netloc
def abs(url, base=None):
""" Returns the absolute URL:
../media + http://en.wikipedia.org/wiki/ => http://en.wikipedia.org/media
"""
if url.startswith("#") and not base is None and not base.endswith("/"):
if not re.search("[^/]/[^/]", base):
base += "/"
return urlparse.urljoin(base, url)
DEPTH = "depth"
BREADTH = "breadth"
FIFO = "fifo" # First In, First Out.
FILO = "filo" # First In, Last Out.
LIFO = "lifo" # Last In, First Out (= FILO).
class Spider:
def __init__(self, links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO):
""" A spider can be used to browse the web in an automated manner.
It visits the list of starting URLs, parses links from their content, visits those, etc.
- Links can be prioritized by overriding Spider.priority().
- Links can be ignored by overriding Spider.follow().
- Each visited link is passed to Spider.visit(), which can be overridden.
"""
self.parse = parser
self.delay = delay # Delay between visits to the same (sub)domain.
self.domains = domains # Domains the spider is allowed to visit.
self.history = {} # Domain name => time last visited.
self.visited = {} # URLs visited.
self._queue = [] # URLs scheduled for a visit: (priority, time, Link).
self._queued = {} # URLs scheduled so far, lookup dictionary.
self.QUEUE = 10000 # Increase or decrease according to available memory.
self.sort = sort
# Queue given links in given order:
for link in (isinstance(links, basestring) and [links] or links):
self.push(link, priority=1.0, sort=FIFO)
@property
def done(self):
""" Yields True if no further links are scheduled to visit.
"""
return len(self._queue) == 0
def push(self, link, priority=1.0, sort=FILO):
""" Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True
def pop(self, remove=True):
""" Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until Spider.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link
@property
def next(self):
""" Returns the next Link queued to visit (without removing it).
"""
return self.pop(remove=False)
def crawl(self, method=DEPTH, **kwargs):
""" Visits the next link in Spider._queue.
If the link is on a domain recently visited (< Spider.delay) it is skipped.
Parses the content at the link for new links and adds them to the queue,
according to their Spider.priority().
Visited links (and content) are passed to Spider.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Spider.follow() is True are queued.
# 5) Only links on Spider.domains are queued.
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Spider.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Spider.pop()).
# Log the URL as visited.
self.history[base(link.url)] = time.time()
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False
def normalize(self, url):
""" Called from Spider.crawl() to normalize URLs.
For example: return url.split("?")[0]
"""
# All links pass through here (visited or not).
# This can be a place to count backlinks.
return url
def follow(self, link):
""" Called from Spider.crawl() to determine if it should follow this link.
For example: return "nofollow" not in link.relation
"""
return True
def priority(self, link, method=DEPTH):
""" Called from Spider.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80
def visit(self, link, source=None):
""" Called from Spider.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass
def fail(self, link):
""" Called from Spider.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass
#class Spiderling(Spider):
# def visit(self, link, source=None):
# print "visited:", link.url, "from:", link.referrer
# def fail(self, link):
# print "failed:", link.url
#
#s = Spiderling(links=["http://nodebox.net/"], domains=["nodebox.net"], delay=5)
#while not s.done:
# s.crawl(method=DEPTH, cached=True, throttle=5)
#--- CRAWL FUNCTION --------------------------------------------------------------------------------
# Functional approach to crawling.
Crawler = Spider
def crawl(links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO, method=DEPTH, **kwargs):
""" Returns a generator that yields (Link, source)-tuples of visited pages.
When the crawler is busy, it yields (None, None).
When the crawler is done, it yields None.
"""
# The scenarios below defines "busy":
# - crawl(delay=10, throttle=0)
# The crawler will wait 10 seconds before visiting the same subdomain.
# The crawler will not throttle downloads, so the next link is visited instantly.
# So sometimes (None, None) is returned while it waits for an available subdomain.
# - crawl(delay=0, throttle=10)
# The crawler will halt 10 seconds after each visit.
# The crawler will not delay before visiting the same subdomain.
# So usually a result is returned each crawl.next(), but each call takes 10 seconds.
# - asynchronous(crawl().next)
# AsynchronousRequest.value is set to (Link, source) once AsynchronousRequest.done=True.
# The program will not halt in the meantime (i.e., the next crawl is threaded).
crawler = Crawler(links, domains, delay, parser, sort)
bind(crawler, "visit", \
lambda crawler, link, source=None: \
setattr(crawler, "crawled", (link, source))) # Define Crawler.visit() on-the-fly.
while not crawler.done:
crawler.crawled = (None, None)
crawler.crawl(method, **kwargs)
yield crawler.crawled
#for link, source in crawl("http://www.nodebox.net/", delay=0, throttle=10):
# print link
#g = crawl("http://www.nodebox.net/")
#for i in range(10):
# p = asynchronous(g.next)
# while not p.done:
# print "zzz..."
# time.sleep(0.1)
# link, source = p.value
# print link
#### PDF PARSER ####################################################################################
# Yusuke Shinyama, PDFMiner, http://www.unixuser.org/~euske/python/pdfminer/
class PDFParseError(Exception):
pass
class PDF:
def __init__(self, data, format=None):
""" Plaintext parsed from the given PDF data.
"""
self.content = self._parse(data, format)
@property
def string(self):
return self.content
def __unicode__(self):
return self.content
def _parse(self, data, format=None):
# The output will be ugly: it may be useful for mining but probably not for displaying.
# You can also try PDF(data, format="html") to preserve some layout information.
from pdf.pdfinterp import PDFResourceManager, process_pdf
from pdf.converter import TextConverter, HTMLConverter
from pdf.layout import LAParams
s = ""
m = PDFResourceManager()
try:
# Given data is a PDF file path.
data = os.path.exists(data) and open(data) or StringIO.StringIO(data)
except TypeError:
# Given data is a PDF string.
data = StringIO.StringIO(data)
try:
stream = StringIO.StringIO()
parser = format=="html" and HTMLConverter or TextConverter
parser = parser(m, stream, codec="utf-8", laparams=LAParams())
process_pdf(m, parser, data, set(), maxpages=0, password="")
except Exception, e:
raise PDFParseError, str(e)
s = stream.getvalue()
s = decode_utf8(s)
s = s.strip()
s = re.sub(r"([a-z])\-\n", "\\1", s) # Join hyphenated words.
s = s.replace("\n\n", "<!-- paragraph -->") # Preserve paragraph spacing.
s = s.replace("\n", " ")
s = s.replace("<!-- paragraph -->", "\n\n")
s = collapse_spaces(s)
return s
| decebel/dataAtom_alpha | bin/plug/py/external/pattern/web/__init__.py | Python | apache-2.0 | 115,157 | [
"VisIt"
] | 1053a318d34d19587cab39fc03032149511425bb8547a284dcd77c3b67c9e640 |
from dnfpy.cellular.hardlib import HardLib
from dnfpy.core.map2D import Map2D
from dnfpy.cellular.nSpikeConvolution import normalizeIntensity,normalizeProba
import numpy as np
import random
import sys
class Rsdnf2LayerConvolution(Map2D):
class Params:
#enum CellRsdnf_Params {NB_SPIKE=0,PROBA=1,PRECISION_PROBA=2};
#enum CellRsdnf2_Params {PROBA_INH=3,PRECISION_RANDOM=4,NB_BIT_RANDOM=5,SHIFT=6};
NB_SPIKE=0
PROBA=1
PRECISION_PROBA=2
PROBA_INH=3
PRECISION_RANDOM=4
NB_BIT_RANDOM=5
SHIFT=6
class Reg:
ACTIVATED=0
NB_BIT_RECEIVED=1
NB_BIT_INH_RECEIVED=2
class SubReg:
BUFFER = 0
SPIKE_OUT = 1
"""
Children needed: "activation" with map of 0 and 1
"""
def __init__(self,name,size,dt=0.1,nspike=20,precisionProba=31,
iExc=1.25,iInh=0.7,pExc=0.0043,pInh=0.4,alpha=10,
iExc_=1.,iInh_=1.,pInh_=0.,pExc_=0.,reproductible=True,
nstep=1,shift=4,clkRatio = 50,
**kwargs):
self.lib = HardLib(size,size,"cellrsdnf2","rsdnfconnecter2layer")
super().__init__(name,size,dt=dt,
nspike=nspike,
precisionProba=precisionProba,nstep=nstep,
iExc=iExc,iInh=iInh,pExc=pExc,pInh=pInh,alpha=alpha,
iExc_=iExc_,iInh_=iInh_,pInh_=pInh_,pExc_=pExc_,
reproductible=reproductible,
shift=shift,clkRatio=clkRatio,baseDt=dt,
**kwargs)
self.newActivation = True #true when we want to get the new activation
self.excMap = np.zeros((size,size),dtype=np.intc)
self.inhMap = np.zeros((size,size),dtype=np.intc)
def _compute(self,activation,iInh_,iExc_):
self._compute2(activation,iInh_,iExc_)
def _compute2(self,activation,iInh_,iExc_):
if self.newActivation:
self.newActivation = False
self.setActivation(activation)
self.lib.preCompute()
self.lib.step()
self.lib.getRegArray(self.Reg.NB_BIT_RECEIVED,self.excMap)
self.lib.getRegArray(self.Reg.NB_BIT_INH_RECEIVED,self.inhMap)
self._data = self.excMap * iExc_ - self.inhMap * iInh_
def setActivation(self,activation):
self.lib.setRegArray(self.Reg.ACTIVATED,activation)
self.lib.synch()
def reset(self):
if self.lib:
self.lib.reset()
super().reset()
size = self.getArg('size')
self.newActivation = True #true when we want to get the new activation
self.excMap = np.zeros((size,size),dtype=np.intc)
self.inhMap = np.zeros((size,size),dtype=np.intc)
def resetLat(self):
"""
Reset the NB_BIT_RECEIVED attribute of the map cells
whenever the neuron potential is updated by reading
self._data, the resetData method should be called
In a fully pipelined BsRSDNF, the neuron potential
is updated on every bit reception, the resetData is the called
at every computation
"""
size = self.getArg('size')
self.lib.setRegArray(self.Reg.NB_BIT_RECEIVED, \
np.zeros((size,size),dtype=np.intc))
self.lib.setRegArray(self.Reg.NB_BIT_INH_RECEIVED, \
np.zeros((size,size),dtype=np.intc))
#reset buffer
zeros = np.zeros((size,size,8),dtype=np.intc)
self.lib.setArraySubState(self.SubReg.BUFFER,zeros)
self.lib.setArraySubState(self.SubReg.SPIKE_OUT,zeros)
self.lib.synch()
self.newActivation=True
def _onParamsUpdate(self,size,alpha,nspike,iExc,iInh,pExc,pInh,
precisionProba,reproductible,shift,clkRatio):
pExc_ = normalizeProba(pExc,size)
pInh_ = normalizeProba(pInh,size)
iExc_ = normalizeIntensity(iExc,size,alpha,nspike)
iInh_ = normalizeIntensity(iInh,size,alpha,nspike)
# print("size : %s"%size)
# print("pExc %s, pInh %s, iExc %s, iInh %s"%(pExc,pInh,iExc,iInh))
# print("pExc_ %s, pInh_ %s, iExc_ %s, iInh_ %s"%(pExc_,pInh_,iExc_,iInh_))
self.lib.setMapParam(self.Params.NB_SPIKE,nspike)
self.lib.setMapParam(self.Params.PROBA,pExc_)
self.lib.setMapParam(self.Params.PROBA_INH,pInh_)
self.lib.setMapParam(self.Params.PRECISION_PROBA,2**precisionProba-1)
self.lib.setMapParam(self.Params.PRECISION_RANDOM,2**precisionProba-1)
self.lib.setMapParam(self.Params.NB_BIT_RANDOM,precisionProba)
self.lib.setMapParam(self.Params.SHIFT,shift)
if reproductible:
self.lib.initSeed(255)
else:
seed = random.randint(1, 10000000)
self.lib.initSeed(seed)
newDt = self.getArg('baseDt') / clkRatio
self.setArg(dt=newDt)
return dict(pExc_=pExc_,pInh_=pInh_,iExc_=iExc_,iInh_=iInh_)
if __name__ == "__main__":
size = 100
activation = np.zeros( ( size,size),np.bool_)
uut = Rsdnf2LayerConvolution("uut",size,activation=activation)
uut.reset()
activation[size//2,size//2] = 1
uut.setParams(pExc=1,pInh=1,nspike=20)
activation[size//2-5:size//2+5,size//2-5:size//2+5] = 1
uut.setParams(nspike=20)
for i in range(100*20 + 200):
uut.compute()
data = uut.excMap
assert(np.sum(data)==100*100*100*20 - 100*20)
| bchappet/dnfpy | src/dnfpy/cellular/rsdnf2LayerConvolution.py | Python | gpl-2.0 | 5,576 | [
"NEURON"
] | 955a848a56a1ac255a3654018963a7bc28c37c2b645efe237f49b08e35cef50c |
#!/usr/bin/env python
# Python example script that uses the vtkMatlabEngineInterface to perform
# a calculation (sin(x)^2 + cos(x)^2 = 1) on VTK array data, and pass the
# result back to VTK.
# VTK must be built with VTK_USE_MATLAB_MEX turned on for this example to work!
from __future__ import print_function
from vtk import *
import math
if __name__ == "__main__":
# Create an instance of the Matlab Engine. Note, menginterface is not a VTK pipeline object.
menginterface = vtkMatlabEngineInterface()
# Create two arrays of doubles in VTK. y contains sin(x)^2
x = vtkDoubleArray()
y = vtkDoubleArray()
for d in range(0, 100):
x.InsertNextValue(d);
y.InsertNextValue(math.sin(d)**2)
# Copy the x and y to Matlab with the same variable names
menginterface.PutVtkDataArray("x", x)
menginterface.PutVtkDataArray("y", y)
# Calculate cos(x)^2 + sin(x)^2 = 1 in Matlab.
menginterface.EvalString("y = y + cos(x).^2")
# Copy y back to VTK as variable result
result = menginterface.GetVtkDataArray("y")
# Display contents of result, should be all ones.
print("\n\nContents of result array copied to VTK from Matlab\n\n")
for i in range(result.GetNumberOfTuples()):
t = result.GetTuple1(i)
print('result[%d] = %6.4f' % (i,t))
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Infovis/Python/Matlab_engine_interface.py | Python | gpl-3.0 | 1,277 | [
"VTK"
] | b857d2a54e38263c7fd53aba1f6a012ce856a2fb72926ccbb9327b9848241535 |
#!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import sys
import itertools
import xml.etree.ElementTree as Xml
import math
# Append mugqic_pipelines directory to Python library path
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))))
# MUGQIC Modules
from bfx.readset import *
from bfx import bvatools
from bfx import picard
from pipelines import common
log = logging.getLogger(__name__)
class RunInfoRead(object):
""" Model of a read from the Illumina sequencer.
Those attributes can be found in the RunInfo.xml file.
"""
def __init__(self, number, nb_cycles, is_index):
self._number = number
self._nb_cycles = nb_cycles
self._is_index = is_index
@property
def number(self):
return self._number
@property
def nb_cycles(self):
return self._nb_cycles
@property
def is_index(self):
return self._is_index
class IlluminaRunProcessing(common.MUGQICPipeline):
"""
Illumina Run Processing Pipeline
================================
The standard MUGQIC Illumina Run Processing pipeline uses the Illumina bcl2fastq
software to convert and demultiplex the base call files to fastq files. The
pipeline runs some QCs on the raw data, on the fastq and on the alignment.
Sample Sheets
-------------
The pipeline uses two input sample sheets. The first one is the standard Casava
sheet, a csv file having the following columns (please refer to the Illumina
Casava user guide):
- `SampleID`
- `FCID`
- `SampleRef`
- `Index`
- `Description`
- `Control`
- `Recipe`
- `Operator`
- `SampleProject`
Example:
FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject
H84WNADXX,1,sample1_MPS0001,,TAAGGCGA-AGAGTAGA,,N,,,nanuq
H84WNADXX,1,sample47_MPS0047,,GTAGAGGA-CTAAGCCT,,N,,,nanuq
The second sample sheet is called the Nanuq run sheet. It's a csv file with the
following minimal set of mandatory columns (the column order in the file doesn't
matter)
- `ProcessingSheetId` Must be the same as the `SampleID` from the Casava Sheet.
- `Name` The sample name put in RG headers of bam files and on filename on disk.
- `Run` The run number.
- `Region` The lane number.
- `Library Barcode` The library barcode put in .bam's RG headers and on disk
- `Library Source` The type of library. If this value contains `RNA` or `cDNA`,
`STAR` will be used to make the aligmnent, otherwise, `bwa_mem` will be used
- `Library Type` Used to determine is the sample is from cDNA/RNA when the
`Library Source` is `Library`
- `BED Files` The name of the BED file containing the genomic targets. This is
the `filename` parameter passed to the `fetch_bed_file_command`
- `Genomic Database` The reference used to make the alignment and calculate aligments metrics
Example:
Name,Genomic Database,Library Barcode,Library Source,Library Type,Run,Region,BED Files,ProcessingSheetId
sample1,Rattus_norvegicus:Rnor_5.0,MPS0001,RNA,Nextera XT,1419,1,toto.bed,sample1_MPS0001
sample47,,MPS1047,Library,Nextera XT,1419,2,toto.bed,sample47_MPS1047
"""
def __init__(self):
self.copy_job_inputs = []
self.argparser.add_argument("-d", "--run", help="run directory", required=False, dest="run_dir")
self.argparser.add_argument("--lane", help="lane number", type=int, required=False, dest="lane_number")
self.argparser.add_argument("-r", "--readsets", help="nanuq readset file. The default file is 'run.nanuq.csv' in the output folder. Will be automatically downloaded if not present.", type=file, required=False)
self.argparser.add_argument("-i", help="illumina casava sheet. The default file is 'SampleSheet.nanuq.csv' in the output folder. Will be automatically downloaded if not present", type=file, required=False,
dest="casava_sheet_file")
self.argparser.add_argument("-x", help="first index base to use for demultiplexing (inclusive). The index from the sample sheet will be adjusted according to that value.", type=int, required=False,
dest="first_index")
self.argparser.add_argument("-y", help="last index base to use for demultiplexing (inclusive)", type=int, required=False,
dest="last_index")
self.argparser.add_argument("-m", help="number of index mistmaches allowed for demultiplexing (default 1). Barcode collisions are always checked.", type=int,
required=False, dest="number_of_mismatches")
self.argparser.add_argument("-w", "--force-download",
help="force the download of the samples sheets (default: false)",
action="store_true",
dest="force_download")
super(IlluminaRunProcessing, self).__init__()
@property
def readsets(self):
if not hasattr(self, "_readsets"):
self._readsets = self.load_readsets()
self.generate_illumina_lane_sample_sheet()
return self._readsets
@property
def is_paired_end(self):
if not hasattr(self, "_is_paired_end"):
self._is_paired_end = len([read_info for read_info in self.read_infos if (not read_info.is_index)]) > 1
return self._is_paired_end
@property
def run_id(self):
""" The run id from the run folder.
Supports both default folder name configuration and GQ's globaly unique name convention.
"""
if not hasattr(self, "_run_id"):
if re.search(".*_\d+HS\d\d[AB]", self.run_dir):
m = re.search(".*/(\d+_[^_]+_\d+_[^_]+_(\d+)HS.+)", self.run_dir)
self._run_id = m.group(2)
elif re.search(".*\d+_[^_]+_\d+_.+", self.run_dir):
m = re.search(".*/(\d+_([^_]+_\d+)_.*)", self.run_dir)
self._run_id = m.group(2)
else:
log.warn("Unsupported folder name: " + self.run_dir)
return self._run_id
@property
def run_dir(self):
if self.args.run_dir:
return self.args.run_dir
else:
raise Exception("Error: missing '-d/--run' option!")
@property
def lane_number(self):
if self.args.lane_number:
return self.args.lane_number
else:
raise Exception("Error: missing '--lane' option!")
@property
def casava_sheet_file(self):
return self.args.casava_sheet_file.name \
if self.args.casava_sheet_file else self.output_dir + os.sep + "SampleSheet.nanuq.csv"
@property
def nanuq_readset_file(self):
return self.args.readsets.name if self.args.readsets else self.output_dir + os.sep + "run.nanuq.csv"
@property
def number_of_mismatches(self):
return self.args.number_of_mismatches if (self.args.number_of_mismatches is not None) else 1
@property
def first_index(self):
return self.args.first_index if self.args.first_index else 1
@property
def last_index(self):
return self.args.last_index if self.args.last_index else 999
@property
def mask(self):
if not hasattr(self, "_mask"):
self._mask = self.get_mask()
return self._mask
@property
def steps(self):
return [
self.index,
self.fastq,
self.align,
self.picard_mark_duplicates,
self.metrics,
self.blast,
self.qc_graphs,
self.md5,
self.copy,
self.end_copy_notification
]
@property
def read_infos(self):
if not hasattr(self, "_read_infos"):
self._read_infos = self.parse_run_info_file()
return self._read_infos
def index(self):
"""
Generate a file with all the indexes found in the index-reads of the run.
The input barcode file is a two columns tsv file. Each line has a
`barcode_sequence` and the corresponding `barcode_name`. This file can be
generated by a LIMS.
The output is a tsv file named `RUNFOLDER_LANENUMBER.metrics` that will be
saved in the output directory. This file has four columns, the barcode/index
sequence, the index name, the number of reads and the number of reads that have
passed the filter.
"""
jobs = []
mask = ""
index_length = self.get_sequencer_index_length()
for read in self.read_infos:
if read.is_index:
mask += str(index_length) + 'B'
break
else:
mask += str(read.nb_cycles) + 'T'
if index_length == 0:
log.info("No Indexes, *NOT* Generating index counts")
else:
input = self.run_dir + os.sep + "RunInfo.xml"
output = self.output_dir + os.sep + os.path.basename(self.run_dir) + "_" + str(
self.lane_number) + '.metrics'
job = Job([input], [output], [["index", "module_java"]],
name="index." + self.run_id + "." + str(self.lane_number))
job.command = """\
java -Djava.io.tmpdir={tmp_dir}\\
{java_other_options}\\
-Xmx{ram}\\
-jar {jar}\\
MAX_MISMATCHES={mistmaches}\\
NUM_PROCESSORS={threads}\\
BARCODE_FILE={barcode_file}\\
BASECALLS_DIR={basecalls_dir}\\
LANE={lane_number}\\
READ_STRUCTURE={read_structure}\\
METRICS_FILE={output}\\
TMP_DIR={tmp_dir}""".format(
tmp_dir=config.param('index', 'tmp_dir'),
java_other_options=config.param('index', 'java_other_options'),
ram=config.param('index', 'ram'),
jar=config.param('index', 'jar'),
mistmaches=self.number_of_mismatches,
threads=config.param('index', 'threads'),
barcode_file=config.param('index', 'barcode_file'),
basecalls_dir=os.path.join(self.run_dir, "Data", "Intensities", "BaseCalls"),
lane_number=self.lane_number,
read_structure=mask,
output=output
)
jobs.append(job)
self.add_copy_job_inputs(jobs)
return jobs
def fastq(self):
"""
Launch fastq generation from Illumina raw data using BCL2FASTQ conversion
software.
The index base mask is calculated according to the sample and run configuration;
and also according the mask parameters received (first/last index bases). The
Casava sample sheet is generated with this mask. The default number of
mismatches allowed in the index sequence is 1 and can be overrided with an
command line argument. No demultiplexing occurs when there is only one sample in
the lane.
An optional notification command can be launched to notify the start of the
fastq generation with the calculated mask.
"""
jobs = []
input = self.casava_sheet_file
fastq_outputs = [readset.fastq1 for readset in self.readsets]
if self.is_paired_end:
fastq_outputs += [readset.fastq2 for readset in self.readsets]
output_dir = self.output_dir + os.sep + "Unaligned." + str(self.lane_number)
casava_sheet_prefix = config.param('fastq', 'casava_sample_sheet_prefix')
other_options = config.param('fastq', 'other_options')
mask = self.mask
demultiplexing = False
command = """\
bcl2fastq\\
--runfolder-dir {run_dir}\\
--output-dir {output_dir}\\
--tiles {tiles}\\
--sample-sheet {sample_sheet}\\
{other_options}\\
""".format(
run_dir=self.run_dir,
output_dir=output_dir,
tiles="s_" + str(self.lane_number),
sample_sheet=self.output_dir + os.sep + casava_sheet_prefix + str(self.lane_number) + ".csv",
other_options=other_options
)
if re.search("I", mask):
self.validate_barcodes()
demultiplexing = True
command += " --barcode-mismatches {number_of_mismatches} --use-bases-mask {mask}".format(
number_of_mismatches=self.number_of_mismatches,
mask=mask
)
job = Job([input],
fastq_outputs,
[('fastq', 'module_bcl_to_fastq'), ('fastq', 'module_gcc')],
command=command,
name="fastq." + self.run_id + "." + str(self.lane_number)
)
jobs.append(job)
# don't depend on notification commands
self.add_copy_job_inputs(jobs)
notification_command_start = config.param('fastq_notification_start', 'notification_command', required=False)
if notification_command_start:
notification_command_start = notification_command_start.format(
output_dir=self.output_dir,
number_of_mismatches=self.number_of_mismatches if demultiplexing else "-",
lane_number=self.lane_number,
mask=mask if demultiplexing else "-",
technology=config.param('fastq', 'technology'),
run_id=self.run_id
)
# Use the same inputs and output of fastq job to send a notification each time the fastq job run
job = Job([input], ["notificationFastqStart." + str(self.lane_number) + ".out"],
name="fastq_notification_start." + self.run_id + "." + str(self.lane_number),
command=notification_command_start)
jobs.append(job)
notification_command_end = config.param('fastq_notification_end', 'notification_command', required=False)
if notification_command_end:
notification_command_end = notification_command_end.format(
output_dir=self.output_dir,
lane_number=self.lane_number,
technology=config.param('fastq', 'technology'),
run_id=self.run_id
)
job = Job(fastq_outputs, ["notificationFastqEnd." + str(self.lane_number) + ".out"],
name="fastq_notification_end." + self.run_id + "." + str(self.lane_number),
command=notification_command_end)
jobs.append(job)
return jobs
def align(self):
"""
Align the reads from the fastq file, sort the resulting .bam and create an index
of that .bam.
An basic aligment is performed on a sample when the `SampleRef` field of the
Illumina sample sheet match one of the regexp in the configuration file and the
corresponding genome (and indexes) are installed.
`STAR` is used as a splice-junctions aware aligner when the sample
`library_source` is `cDNA` or contains `RNA`; otherwise `BWA_mem` is used to
align the reads.
"""
jobs = []
for readset in [readset for readset in self.readsets if readset.bam]:
jobs.append(readset.aligner.get_alignment_job(readset))
self.add_copy_job_inputs(jobs)
return self.throttle_jobs(jobs)
def picard_mark_duplicates(self):
"""
Runs Picard mark duplicates on the sorted bam file.
"""
jobs = []
for readset in [readset for readset in self.readsets if readset.bam]:
input_file_prefix = readset.bam + '.'
input = input_file_prefix + "bam"
output = input_file_prefix + "dup.bam"
metrics_file = readset.bam + ".dup.metrics"
job = picard.mark_duplicates([input], output, metrics_file)
job.name = "picard_mark_duplicates." + readset.name + ".dup." + self.run_id + "." + str(self.lane_number)
jobs.append(job)
self.add_copy_job_inputs(jobs)
return self.throttle_jobs(jobs)
def metrics(self):
"""
This step runs a series of multiple metrics collection jobs and the output bam
from mark duplicates.
- Picard CollectMultipleMetrics: A collection of picard metrics that runs at the
same time to save on I/O.
- CollectAlignmentSummaryMetrics,
- CollectInsertSizeMetrics,
- QualityScoreDistribution,
- MeanQualityByCycle,
- CollectBaseDistributionByCycle
- BVATools DepthOfCoverage: Using the specified `BED Files` in the sample sheet,
calculate the coverage of each target region.
- Picard CalculateHsMetrics: Calculates a set of Hybrid Selection specific
metrics from the BAM file. The bait and interval list is automatically created
from the specicied `BED Files`.
"""
jobs = []
for readset in [readset for readset in self.readsets if readset.bam]:
jobs.extend(readset.aligner.get_metrics_jobs(readset))
self.add_copy_job_inputs(jobs)
return self.throttle_jobs(jobs)
def blast(self):
"""
Run blast on a subsample of the reads of each sample to find the 20 most
frequent hits.
The `runBlast.sh` tool from MUGQIC Tools is used. The number of reads to
subsample can be configured by sample or for the whole lane. The output will be
in the `Blast_sample` folder, under the Unaligned folder.
"""
jobs = []
nb_blast_to_do = config.param('blast', 'nb_blast_to_do', type="posint")
is_nb_blast_per_lane = config.param('blast', 'is_nb_for_whole_lane', type="boolean")
if is_nb_blast_per_lane:
nb_blast_to_do = int(nb_blast_to_do) // len(self.readsets)
nb_blast_to_do = max(1, nb_blast_to_do)
for readset in self.readsets:
output_prefix = os.path.join(self.output_dir,
"Unaligned." + readset.lane,
"Blast_sample",
readset.name + "_" + readset.sample_number + "_L00" + readset.lane)
output = output_prefix + '.R1.RDP.blastHit_20MF_species.txt'
current_jobs = [Job(command="mkdir -p " + os.path.dirname(output))]
fasta_file = output_prefix + ".R1.subSampled_{nb_blast_to_do}.fasta".format(
nb_blast_to_do=nb_blast_to_do)
result_file = output_prefix + ".R1.subSampled_{nb_blast_to_do}.blastres".format(
nb_blast_to_do=nb_blast_to_do)
if readset.bam:
input = readset.bam + ".bam"
# count the read that aren't marked as secondary alignment and calculate the ratio of reads to subsample
command = """subsampling=$(samtools view -F 0x0180 {input} | wc -l | awk -v nbReads={nb_blast_to_do} '{{x=sprintf("%.4f", nbReads/$1); if (x == "0.0000") print "0.0001"; else print x}}')""".format(
input=input,
nb_blast_to_do=nb_blast_to_do
)
current_jobs.append(Job([input], [], [["blast", "module_samtools"]], command=command))
# subsample the reads and output to a temp fasta
command = """samtools view -s $subsampling -F 0x0180 {input} | awk '{{OFS="\\t"; print ">"$1"\\n"$10}}' - > {fasta_file}""".format(
input=input,
fasta_file=fasta_file
)
current_jobs.append(Job([input], [], [["blast", "module_samtools"]], command=command))
# run blast
command = """blastn -query {fasta_file} -db nt -out {result_file} -perc_identity 80 -num_descriptions 1 -num_alignments 1""".format(
fasta_file=fasta_file,
result_file=result_file
)
current_jobs.append(Job([], [], [["blast", "module_blast"]], command=command))
# filter and format the result to only have the sorted number of match and the species
command = """grep ">" {result_file} | awk ' {{ print $2 "_" $3}} ' | sort | uniq -c | sort -n -r | head -20 > {output} && true""".format(
result_file=result_file,
output=output
)
current_jobs.append(Job([], [output], [], command=command))
else:
inputs = [readset.fastq1, readset.fastq2]
command = "runBlast.sh " + str(nb_blast_to_do) + " " + output_prefix + " " + readset.fastq1 + " "
if readset.fastq2:
command += readset.fastq2
current_jobs.append(Job(inputs, [output], [["blast", "module_mugqic_tools"], ["blast", "module_blast"]],
command=command))
# rRNA estimate using silva blast db, using the same subset of reads as the "normal" blast
rrna_db = config.param('blast', 'rrna_db', required=False)
if readset.is_rna and rrna_db:
rrna_result_file = result_file + "Rrna"
rrna_output = output_prefix + ".R1.subSampled_{nb_blast_to_do}.rrna".format(
nb_blast_to_do=nb_blast_to_do)
command = """blastn -query {fasta_file} -db {db} -out {result_file} -perc_identity 80 -num_descriptions 1 -num_alignments 1""".format(
fasta_file=fasta_file,
result_file=rrna_result_file,
db=rrna_db
)
current_jobs.append(Job([], [], [["blast", "module_blast"]], command=command))
command = """echo '{db}' > {output}""".format(
db=rrna_db,
output=rrna_output
)
current_jobs.append(Job([], [output], [], command=command))
command = """grep ">" {result_file} | wc -l >> {output}""".format(
result_file=rrna_result_file,
output=rrna_output
)
current_jobs.append(Job([], [output], [], command=command))
command = """grep ">" {fasta_file} | wc -l >> {output}""".format(
fasta_file=fasta_file,
output=rrna_output
)
current_jobs.append(Job([], [output], [], command=command))
# merge all blast steps of the readset into one job
job = concat_jobs(current_jobs,
name="blast." + readset.name + ".blast." + self.run_id + "." + str(self.lane_number))
jobs.append(job)
self.add_copy_job_inputs(jobs)
return self.throttle_jobs(jobs)
def qc_graphs(self):
"""
Generate some QC Graphics and a summary XML file for each sample using
[BVATools](https://bitbucket.org/mugqic/bvatools/).
Files are created in a 'qc' subfolder of the fastq directory. Examples of
output graphic:
- Per cycle qualities, sequence content and sequence length;
- Known sequences (adaptors);
- Abundant Duplicates;
"""
jobs = []
for readset in self.readsets:
output_dir = os.path.dirname(readset.fastq1) + os.sep + "qc"
region_name = readset.name + "_" + readset.sample_number + "_L00" + readset.lane
file1 = readset.fastq1
file2 = readset.fastq2
type = "FASTQ"
if readset.bam:
file1 = readset.bam + ".bam"
file2 = None
type = "BAM"
job = concat_jobs([
Job(command="mkdir -p " + output_dir),
bvatools.readsqc(
file1,
file2,
type,
region_name,
output_dir
)]
)
job.name = "qc." + readset.name + ".qc." + self.run_id + "." + str(self.lane_number)
jobs.append(job)
self.add_copy_job_inputs(jobs)
return self.throttle_jobs(jobs)
def md5(self):
"""
Create md5 checksum files for the fastq, bam and bai using the system 'md5sum'
util.
One checksum file is created for each file.
"""
jobs = []
for readset in self.readsets:
current_jobs = [Job([readset.fastq1], [readset.fastq1 + ".md5"],
command="md5sum -b " + readset.fastq1 + " > " + readset.fastq1 + ".md5")]
# Second read in paired-end run
if readset.fastq2:
current_jobs.append(Job([readset.fastq2], [readset.fastq2 + ".md5"],
command="md5sum -b " + readset.fastq2 + " > " + readset.fastq2 + ".md5"))
# Alignment files
if readset.bam:
current_jobs.append(
Job([readset.bam + ".bam"], [readset.bam + ".bam.md5"],
command="md5sum -b " + readset.bam + ".bam" + " > " + readset.bam + ".bam.md5"))
current_jobs.append(Job([], [readset.bam + ".bai.md5"], command="md5sum -b " + readset.bam + ".bai" +
" > " + readset.bam + ".bai.md5"))
job = concat_jobs(current_jobs,
name="md5." + readset.name + ".md5." + self.run_id + "." + str(self.lane_number))
jobs.append(job)
if config.param('md5', 'one_job', required=False, type="boolean"):
job = concat_jobs(jobs, "md5." + self.run_id + "." + str(self.lane_number))
self.add_copy_job_inputs([job])
return [job]
else:
self.add_copy_job_inputs(jobs)
return jobs
def copy(self):
"""
Copy processed files to another place where they can be served or loaded into a
LIMS.
The destination folder and the command used can be set in the configuration
file.
An optional notification can be sent before the copy. The command used is in the configuration file.
"""
inputs = self.copy_job_inputs
jobs_to_concat = []
# Notification
output1 = self.output_dir + os.sep + "notificationProcessingComplete." + str(self.lane_number) + ".out"
output2 = self.output_dir + os.sep + "notificationCopyStart." + str(self.lane_number) + ".out"
notification_command = config.param('copy', 'notification_command', required=False)
if notification_command:
job = Job(inputs, [output1, output2],
name="start_copy_notification." + self.run_id + "." + str(self.lane_number))
job.command = notification_command.format(
technology=config.param('copy', 'technology'),
output_dir=self.output_dir,
run_id=self.run_id,
output1=output1,
output2=output2,
lane_number=self.lane_number
)
jobs_to_concat.append(job)
# Actual copy
full_destination_folder = config.param('copy', 'destination_folder', type="dirpath") + os.path.basename(
self.run_dir)
output = full_destination_folder + os.sep + "copyCompleted." + str(self.lane_number) + ".out"
exclude_bam = config.param('copy', 'exclude_bam', required=False, type='boolean')
exclude_fastq_with_bam = config.param('copy', 'exclude_fastq_with_bam', required=False, type='boolean')
if exclude_bam and exclude_fastq_with_bam:
log.warn("Excluding both BAM and fastq files")
excluded_files = []
if exclude_bam or exclude_fastq_with_bam:
for readset in [readset for readset in self.readsets if readset.bam]:
if exclude_bam:
excluded_files.append(readset.bam + ".bam*")
excluded_files.append(readset.bam + ".bai*")
if exclude_fastq_with_bam and not exclude_bam:
excluded_files.append(readset.fastq1)
if readset.fastq2:
excluded_files.append(readset.fastq2)
if self.run_dir != self.output_dir:
copy_command_run_folder = config.param('copy', 'copy_command', required=False).format(
exclusion_clauses="",
lane_number=self.lane_number,
run_id=self.run_id,
source=self.run_dir,
run_name=os.path.basename(self.run_dir)
)
jobs_to_concat.append(Job(inputs, [output], command=copy_command_run_folder))
copy_command_output_folder = config.param('copy', 'copy_command', required=False).format(
exclusion_clauses="\\\n".join(
[" --exclude '" + excludedfile.replace(self.output_dir + os.sep, "") + "'" for excludedfile in
excluded_files]),
lane_number=self.lane_number,
run_id=self.run_id,
source=self.output_dir,
run_name=os.path.basename(self.run_dir)
)
jobs_to_concat.append(Job(inputs, [output], command=copy_command_output_folder))
jobs_to_concat.append(Job(command="touch " + output))
job = concat_jobs(jobs_to_concat, "copy." + self.run_id + "." + str(self.lane_number))
return [job]
def end_copy_notification(self):
"""
Send an optional notification to notify that the copy is finished.
The command used is in the configuration file. This step is skipped when no
command is provided.
"""
jobs = []
full_destination_folder = config.param('copy', 'destination_folder', type="dirpath") + os.path.basename(
self.run_dir)
input = full_destination_folder + os.sep + "copyCompleted." + str(self.lane_number) + ".out"
output = full_destination_folder + os.sep + "notificationAssociation." + str(self.lane_number) + ".out"
notification_command = config.param('end_copy_notification', 'notification_command', required=False)
if notification_command:
job = Job([input], [output], name="end_copy_notification." + self.run_id + "." + str(self.lane_number))
job.command = notification_command.format(
technology=config.param('end_copy_notification', 'technology'),
output_dir=self.output_dir,
run_name=os.path.basename(self.run_dir),
run_id=self.run_id,
output=output,
lane_number=self.lane_number
)
jobs.append(job)
return jobs
#
# Utility methods
#
def add_copy_job_inputs(self, jobs):
for job in jobs:
# we first remove dependencies of the current job, since we will have a dependency on that job
self.copy_job_inputs = [item for item in self.copy_job_inputs if item not in job.input_files]
self.copy_job_inputs.extend(job.output_files)
def get_sequencer_index_length(self):
""" Returns the total number of index cycles of the run. """
return sum(index_read.nb_cycles for index_read in [read for read in self.read_infos if read.is_index])
def get_sequencer_minimum_read_length(self):
""" Returns the minimum number of cycles of a real read (not indexed). """
return min(read.nb_cycles for read in [read for read in self.read_infos if (not read.is_index)])
def validate_barcodes(self):
"""
Validate all index sequences against each other to ensure they aren't in collision according to the chosen
number of mismatches parameter.
"""
min_allowed_distance = (2 * self.number_of_mismatches) + 1
validated_indexes = []
collisions = []
for readset in self.readsets:
current_index = readset.index.replace('-', '')
for candidate_index in validated_indexes:
if distance(current_index, candidate_index) < min_allowed_distance:
collisions.append("'" + current_index + "' and '" + candidate_index + "'")
validated_indexes.append(current_index)
if len(collisions) > 0:
raise Exception("Barcode collisions: " + ";".join(collisions))
def get_mask(self):
""" Returns a BCL2FASTQ friendly mask of the reads cycles.
The mask is calculated using:
- first base and last base of index;
- the index length in the sample sheet;
- the number of index cycles on the sequencer;
"""
mask = ""
index_lengths = self.get_smallest_index_length()
index_read_count = 0
nb_total_index_base_used = 0
for read_info in self.read_infos:
if len(mask) > 0:
mask += ','
if read_info.is_index:
if read_info.nb_cycles >= index_lengths[index_read_count]:
if index_lengths[index_read_count] == 0 or self.last_index <= nb_total_index_base_used:
# Don't use any index bases for this read
mask += 'n' + str(read_info.nb_cycles)
else:
nb_n_printed = 0
# Ns in the beginning of the index read
if self.first_index > (nb_total_index_base_used + 1):
nb_n_printed = min(read_info.nb_cycles, self.first_index - nb_total_index_base_used - 1)
if nb_n_printed >= index_lengths[index_read_count]:
nb_n_printed = read_info.nb_cycles
mask += 'n' + str(nb_n_printed)
# Calculate the number of index bases
nb_index_bases_used = max(index_lengths[index_read_count] - nb_n_printed, 0)
nb_index_bases_used = min(self.last_index - nb_total_index_base_used - nb_n_printed,
nb_index_bases_used)
nb_total_index_base_used += nb_index_bases_used + min(nb_n_printed,
index_lengths[index_read_count])
if nb_index_bases_used > 0:
mask += 'I' + str(nb_index_bases_used)
# Ns at the end of the index read
remaining_base_count = read_info.nb_cycles - nb_index_bases_used - nb_n_printed
if remaining_base_count > 0:
mask += 'n' + str(remaining_base_count)
index_read_count += 1
else:
# Normal read
mask += 'Y' + str(read_info.nb_cycles)
return mask
def generate_illumina_lane_sample_sheet(self):
""" Create a sample sheet to use with the BCL2FASTQ software.
Only the samples of the chosen lane will be in the file.
The sample indexes are trimmed according to the mask used.
"""
read_masks = self.mask.split(",")
has_single_index = self.has_single_index()
csv_headers = ["FCID", "Lane", "Sample_ID", "Sample_Name", "SampleRef", "Index", "Index2", "Description", "Control",
"Recipe", "Operator", "Sample_Project"]
csv_file = self.output_dir + os.sep + config.param('DEFAULT', 'casava_sample_sheet_prefix') + str(
self.lane_number) + ".csv"
writer = csv.DictWriter(open(csv_file, 'wb'), delimiter=str(','), fieldnames=csv_headers)
# add [Data] line before the actual headers
section_header_dict = {"FCID": "[Data]"}
writer.writerow(section_header_dict)
writer.writeheader()
for readset in self.readsets:
index_to_use = ""
if len(readset.index) > 0 and len(self.readsets) > 1:
indexes = readset.index.split("-")
nb_index = len(indexes)
if has_single_index:
# we have a mixed of index in the sample, there are samples with 1 or 2 index,
# ignore the second index in the samplesheet
nb_index = 1
for i in range(0, nb_index):
nb_ignored_leading_bases = 0
nb_of_index_bases = 0
m = re.match("(n\d+)?(I\d+)(n\d+)?", read_masks[i + 1])
if m:
if m.group(1):
nb_ignored_leading_bases = int(m.group(1)[1:])
if m.group(2):
nb_of_index_bases = int(m.group(2)[1:])
# remove ignored leading bases and trim index to smallest lane index
index = indexes[i][nb_ignored_leading_bases:nb_ignored_leading_bases + nb_of_index_bases]
if i > 0 and len(index) > 0:
index_to_use += "-"
index_to_use += index
readset._index = index_to_use if len(index_to_use) > 0 else "NoIndex"
index_array = index_to_use.split("-")
fastq_file_pattern = os.path.join(self.output_dir,
"Unaligned." + readset.lane,
'Project_' + readset.project,
'Sample_' + readset.name,
readset.name + '_S' + readset.sample_number + '_L00' + readset.lane +
'_R{read_number}_001.fastq.gz')
readset.fastq1 = fastq_file_pattern.format(read_number=1)
readset.fastq2 = fastq_file_pattern.format(read_number=2) if readset.run_type == "PAIRED_END" else None
csv_dict = {
"FCID": readset.flow_cell,
"Lane": self.lane_number,
"Sample_ID": "Sample_" + readset.name,
"Sample_Name": readset.name,
"SampleRef": "",
"Index": index_array[0],
"Index2": index_array[1] if len(index_array) > 1 else "",
"Description": readset.description,
"Control": readset.control,
"Recipe": readset.recipe,
"Operator": readset.operator,
"Sample_Project": "Project_" + readset.project
}
writer.writerow(csv_dict)
def has_single_index(self):
""" Returns True when there is at least one sample on the lane that doesn't use double-indexing or we only have
one read of indexes.
"""
return len([readset for readset in self.readsets if ("-" not in readset.index)]) > 0 or\
len([read for read in self.read_infos if read.is_index]) < 2
def get_smallest_index_length(self):
"""
Returns a list (for each index read of the run) of the minimum between the number of index cycle on the
sequencer and all the index lengths.
"""
run_index_lengths = [r.nb_cycles for r in self.read_infos if r.is_index] # from RunInfo
if len(run_index_lengths) == 0 and len(self.readsets) > 1:
raise Exception("Multiple samples on a lane, but no indexes were read from the sequencer.")
# loop on all index reads, to compare with samples index length
for i in range(0, len(run_index_lengths)):
min_sample_index_length = 0
try:
min_sample_index_length = min(len(readset.index.split("-")[i])
for readset in
self.readsets
if (len(readset.index.split("-")) > i and len(
readset.index.split("-")[i]) > 0)
)
except ValueError:
pass # we don't have a sample with this Ith index read, use the 0 already set
empty_index_list = [readset for readset in self.readsets if
(len(readset.index.split("-")) <= i or len(readset.index.split("-")[i]) == 0)]
if len(empty_index_list):
# we have samples without this Ith index read, so we skip it
min_sample_index_length = 0
run_index_lengths[i] = min(min_sample_index_length, run_index_lengths[i])
return run_index_lengths
def parse_run_info_file(self):
""" Parse the RunInfo.xml file of the run and returns the list of RunInfoRead objects """
reads = Xml.parse(self.run_dir + os.sep + "RunInfo.xml").getroot().find('Run').find('Reads')
return [RunInfoRead(int(r.get("Number")), int(r.get("NumCycles")), r.get("IsIndexedRead") == "Y") for r in
reads.iter('Read')]
def load_readsets(self):
"""
Download the sample sheets if required or asked for; call the load of these files and return a list of
readsets.
"""
# Casava sheet download
if not self.args.casava_sheet_file or self.args.force_download:
if not os.path.exists(self.casava_sheet_file) or self.args.force_download:
command = config.param('DEFAULT', 'fetch_casava_sheet_command').format(
output_directory=self.output_dir,
run_id=self.run_id,
filename=self.casava_sheet_file
)
log.info(command)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
raise Exception("Unable to download the Casava Sheet.")
# Nanuq readset file download
if not self.args.readsets or self.args.force_download:
if not os.path.exists(self.nanuq_readset_file) or self.args.force_download:
command = config.param('DEFAULT', 'fetch_nanuq_sheet_command').format(
output_directory=self.output_dir,
run_id=self.run_id,
filename=self.nanuq_readset_file
)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
raise Exception("Unable to download the Nanuq readset file.")
return parse_illumina_raw_readset_files(
self.output_dir,
"PAIRED_END" if self.is_paired_end else "SINGLE_END",
self.nanuq_readset_file,
self.casava_sheet_file,
self.args.lane_number,
config.param('DEFAULT', 'genomes_home', type="dirpath"),
self.get_sequencer_minimum_read_length()
)
def submit_jobs(self):
super(IlluminaRunProcessing, self).submit_jobs()
def throttle_jobs(self, jobs):
""" Group jobs of the same task (same name prefix) if they exceed the configured threshold number. """
max_jobs_per_step = config.param('default', 'max_jobs_per_step', required=False, type="int")
jobs_by_name = collections.OrderedDict()
reply = []
# group jobs by task (name)
for job in jobs:
jobs_by_name.setdefault(job.name.split(".", 1)[0], []).append(job)
# loop on all task
for job_name in jobs_by_name:
current_jobs = jobs_by_name[job_name]
if max_jobs_per_step and 0 < max_jobs_per_step < len(current_jobs):
# we exceed the threshold, we group using 'number_task_by_job' jobs per group
number_task_by_job = int(math.ceil(len(current_jobs) / max_jobs_per_step))
merged_jobs = []
for x in range(max_jobs_per_step):
if x * number_task_by_job < len(current_jobs):
merged_jobs.append(concat_jobs(
current_jobs[x * number_task_by_job:min((x + 1) * number_task_by_job, len(current_jobs))],
job_name + "." + str(x + 1) + "." + self.run_id + "." + str(self.lane_number)))
reply.extend(merged_jobs)
else:
reply.extend(current_jobs)
return reply
def distance(str1, str2):
""" Returns the hamming distance. http://code.activestate.com/recipes/499304-hamming-distance/#c2 """
return sum(itertools.imap(unicode.__ne__, str1, str2))
if __name__ == '__main__':
pipeline = IlluminaRunProcessing()
| ccmbioinfo/mugqic_pipelines | pipelines/illumina_run_processing/illumina_run_processing.py | Python | lgpl-3.0 | 46,253 | [
"BLAST"
] | 22a76e5f34c0e4e9d9daac29b09f46b791cd9f84af2170da6dfc6cebda5314c5 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Window')
from data_800ms import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:9]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Time_Window/test10_cross_validate_objects_800ms.py | Python | mit | 4,257 | [
"Mayavi"
] | e8158ce12cf51ffd0f7e026c4081d57189b89751320c9cbce5eb1791c89e61b8 |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
Vth = 15 # mV
w = 2 # ms
tau = 10 # ms
def calc_error(isi):
"""
Calculate approximation error for a given target inter-spike interval.
"""
# input current required for LIF to spike at t = isi
current = Vth/(tau*(1-np.exp(-isi/tau)))
# membrane potential of LIF at time (isi-w)
lif = current*tau*(1-np.exp(-(isi-w)/tau))
# membrane potential of approximate model at time (isi-w)
approx = current*(isi-w)
return abs(lif-approx)/lif
def calc_bound_diff(isi):
"""
Calculate ratio of Lower bound over Upper bound as a function of ISI.
Total reset neuron only.
"""
upper = Vth/w # mV/ms
inp = Vth/(1-np.exp(-isi/tau))
lower = (Vth-inp*(1-np.exp(-(isi-w)/tau)))/w
return lower/upper
isirange = np.arange(2.001, 20, 0.001)
errors = [calc_error(isi) for isi in isirange]
plt.figure(figsize=(4,3))
plt.plot(isirange, errors)
pointsx = [1000/70, 1000/100, 1000/400] # 70, 100, 400 Hz
pointsy = [calc_error(x) for x in pointsx]
for x, y in zip(pointsx, pointsy):
plt.plot(x, y, 'k.', markersize=10)
plt.annotate("{} Hz".format(int(1000/x)), xy=(x, y),
xytext=(x-2, y+0.1))
plt.axis(xmin=0)
locs, labels = plt.xticks()
locs = np.append(locs, 2)
plt.xticks(locs)
plt.xlabel("$\Delta t_i$ (ms)")
plt.ylabel("d")
plt.subplots_adjust(bottom=0.17, top=0.95, left=0.2, right=0.95)
plt.savefig("figures/relerr.pdf")
bound_diff = [calc_bound_diff(isi) for isi in isirange]
plt.figure(figsize=(4,3))
plt.plot(isirange, bound_diff)
pointsx = [1000/70, 1000/100, 1000/400] # 70, 100, 400 Hz
pointsy = [calc_bound_diff(x) for x in pointsx]
for x, y in zip(pointsx, pointsy):
plt.plot(x, y, 'k.', markersize=10)
plt.annotate("{} Hz".format(int(1000/x)), xy=(x, y),
xytext=(x+0.5, y+0.01))
plt.plot([w, w], [0, max(bound_diff)], "k--")
plt.axis(xmin=0)
locs, labels = plt.xticks()
locs = np.append(locs, w)
plt.xticks(locs)
plt.xlabel("$\Delta t_i$ (ms)")
plt.ylabel("Bound ratio $(\\frac{L}{U})$")
plt.subplots_adjust(bottom=0.17, top=0.95, left=0.2, right=0.95)
plt.savefig("figures/bound_ratio.pdf")
| achilleas-k/brian-scripts | thesis_stuff/reldiff.py | Python | apache-2.0 | 2,203 | [
"NEURON"
] | 2133384385e55a405966ad99a0bc75c421bcd2b4a3659bcc680f992855d86c9d |
#!/usr/bin/env python
"""
A linting tool to check if templates are safe
"""
from __future__ import print_function
import argparse
import ast
from enum import Enum
import os
import re
import sys
import textwrap
class StringLines(object):
"""
StringLines provides utility methods to work with a string in terms of
lines. As an example, it can convert an index into a line number or column
number (i.e. index into the line).
"""
def __init__(self, string):
"""
Init method.
Arguments:
string: The string to work with.
"""
self._string = string
self._line_start_indexes = self._process_line_breaks(string)
# this is an exclusive index used in the case that the template doesn't
# end with a new line
self.eof_index = len(string)
def _process_line_breaks(self, string):
"""
Creates a list, where each entry represents the index into the string
where the next line break was found.
Arguments:
string: The string in which to find line breaks.
Returns:
A list of indices into the string at which each line begins.
"""
line_start_indexes = [0]
index = 0
while True:
index = string.find('\n', index)
if index < 0:
break
index += 1
line_start_indexes.append(index)
return line_start_indexes
def get_string(self):
"""
Get the original string.
"""
return self._string
def index_to_line_number(self, index):
"""
Given an index, determines the line of the index.
Arguments:
index: The index into the original string for which we want to know
the line number
Returns:
The line number of the provided index.
"""
current_line_number = 0
for line_break_index in self._line_start_indexes:
if line_break_index <= index:
current_line_number += 1
else:
break
return current_line_number
def index_to_column_number(self, index):
"""
Gets the column (i.e. index into the line) for the given index into the
original string.
Arguments:
index: The index into the original string.
Returns:
The column (i.e. index into the line) for the given index into the
original string.
"""
start_index = self.index_to_line_start_index(index)
column = index - start_index + 1
return column
def index_to_line_start_index(self, index):
"""
Gets the index of the start of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the start of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_start_index(line_number)
def index_to_line_end_index(self, index):
"""
Gets the index of the end of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the end of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_end_index(line_number)
def line_number_to_start_index(self, line_number):
"""
Gets the starting index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the start index.
Returns:
The starting index for the provided line number.
"""
return self._line_start_indexes[line_number - 1]
def line_number_to_end_index(self, line_number):
"""
Gets the ending index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the end index.
Returns:
The ending index for the provided line number.
"""
if line_number < len(self._line_start_indexes):
return self._line_start_indexes[line_number]
else:
# an exclusive index in the case that the file didn't end with a
# newline.
return self.eof_index
def line_number_to_line(self, line_number):
"""
Gets the line of text designated by the provided line number.
Arguments:
line_number: The line number of the line we want to find.
Returns:
The line of text designated by the provided line number.
"""
start_index = self._line_start_indexes[line_number - 1]
if len(self._line_start_indexes) == line_number:
line = self._string[start_index:]
else:
end_index = self._line_start_indexes[line_number]
line = self._string[start_index:end_index - 1]
return line
def line_count(self):
"""
Gets the number of lines in the string.
"""
return len(self._line_start_indexes)
class Rules(Enum):
"""
An Enum of each rule which the linter will check.
"""
# IMPORTANT: Do not edit without also updating the docs:
# - http://edx.readthedocs.io/projects/edx-developer-guide/en/latest/conventions/safe_templates.html#safe-template-linter
mako_missing_default = 'mako-missing-default'
mako_multiple_page_tags = 'mako-multiple-page-tags'
mako_unparseable_expression = 'mako-unparseable-expression'
mako_unwanted_html_filter = 'mako-unwanted-html-filter'
mako_invalid_html_filter = 'mako-invalid-html-filter'
mako_invalid_js_filter = 'mako-invalid-js-filter'
mako_js_missing_quotes = 'mako-js-missing-quotes'
mako_js_html_string = 'mako-js-html-string'
mako_html_entities = 'mako-html-entities'
mako_unknown_context = 'mako-unknown-context'
underscore_not_escaped = 'underscore-not-escaped'
javascript_jquery_append = 'javascript-jquery-append'
javascript_jquery_prepend = 'javascript-jquery-prepend'
javascript_jquery_insertion = 'javascript-jquery-insertion'
javascript_jquery_insert_into_target = 'javascript-jquery-insert-into-target'
javascript_jquery_html = 'javascript-jquery-html'
javascript_concat_html = 'javascript-concat-html'
javascript_escape = 'javascript-escape'
javascript_interpolate = 'javascript-interpolate'
python_concat_html = 'python-concat-html'
python_custom_escape = 'python-custom-escape'
python_deprecated_display_name = 'python-deprecated-display-name'
python_requires_html_or_text = 'python-requires-html-or-text'
python_close_before_format = 'python-close-before-format'
python_wrap_html = 'python-wrap-html'
python_interpolate_html = 'python-interpolate-html'
python_parse_error = 'python-parse-error'
def __init__(self, rule_id):
self.rule_id = rule_id
class Expression(object):
"""
Represents an arbitrary expression.
An expression can be any type of code snippet. It will sometimes have a
starting and ending delimiter, but not always.
Here are some example expressions::
${x | n, decode.utf8}
<%= x %>
function(x)
"<p>" + message + "</p>"
Other details of note:
- Only a start_index is required for a valid expression.
- If end_index is None, it means we couldn't parse the rest of the
expression.
- All other details of the expression are optional, and are only added if
and when supplied and needed for additional checks. They are not necessary
for the final results output.
"""
def __init__(self, start_index, end_index=None, template=None, start_delim="", end_delim="", strings=None):
"""
Init method.
Arguments:
start_index: the starting index of the expression
end_index: the index immediately following the expression, or None
if the expression was unparseable
template: optional template code in which the expression was found
start_delim: optional starting delimiter of the expression
end_delim: optional ending delimeter of the expression
strings: optional list of ParseStrings
"""
self.start_index = start_index
self.end_index = end_index
self.start_delim = start_delim
self.end_delim = end_delim
self.strings = strings
if template is not None and self.end_index is not None:
self.expression = template[start_index:end_index]
self.expression_inner = self.expression[len(start_delim):-len(end_delim)].strip()
else:
self.expression = None
self.expression_inner = None
class RuleViolation(object):
"""
Base class representing a rule violation which can be used for reporting.
"""
def __init__(self, rule):
"""
Init method.
Arguments:
rule: The Rule which was violated.
"""
self.rule = rule
self.full_path = ''
self.is_disabled = False
def _mark_disabled(self, string, scope_start_string=False):
"""
Performs the disable pragma search and marks the rule as disabled if a
matching pragma is found.
Pragma format::
safe-lint: disable=violation-name,other-violation-name
Arguments:
string: The string of code in which to search for the pragma.
scope_start_string: True if the pragma must be at the start of the
string, False otherwise. The pragma is considered at the start
of the string if it has a maximum of 5 non-whitespace characters
preceding it.
Side Effect:
Sets self.is_disabled as appropriate based on whether the pragma is
found.
"""
pragma_match = re.search(r'safe-lint:\s*disable=([a-zA-Z,-]+)', string)
if pragma_match is None:
return
if scope_start_string:
spaces_count = string.count(' ', 0, pragma_match.start())
non_space_count = pragma_match.start() - spaces_count
if non_space_count > 5:
return
for disabled_rule in pragma_match.group(1).split(','):
if disabled_rule == self.rule.rule_id:
self.is_disabled = True
return
def sort_key(self):
"""
Returns a key that can be sorted on
"""
return (0, 0, self.rule.rule_id)
def first_line(self):
"""
Since a file level rule has no first line, returns empty string.
"""
return ''
def prepare_results(self, full_path, string_lines):
"""
Preps this instance for results reporting.
Arguments:
full_path: Path of the file in violation.
string_lines: A StringLines containing the contents of the file in
violation.
"""
self.full_path = full_path
self._mark_disabled(string_lines.get_string())
def print_results(self, _options, out):
"""
Prints the results represented by this rule violation.
Arguments:
_options: ignored
out: output file
"""
print("{}: {}".format(self.full_path, self.rule.rule_id), file=out)
class ExpressionRuleViolation(RuleViolation):
"""
A class representing a particular rule violation for expressions which
contain more specific details of the location of the violation for reporting
purposes.
"""
def __init__(self, rule, expression):
"""
Init method.
Arguments:
rule: The Rule which was violated.
expression: The Expression that was in violation.
"""
super(ExpressionRuleViolation, self).__init__(rule)
self.expression = expression
self.start_line = 0
self.start_column = 0
self.end_line = 0
self.end_column = 0
self.lines = []
self.is_disabled = False
def _mark_expression_disabled(self, string_lines):
"""
Marks the expression violation as disabled if it finds the disable
pragma anywhere on the first line of the violation, or at the start of
the line preceding the violation.
Pragma format::
safe-lint: disable=violation-name,other-violation-name
Examples::
<% // safe-lint: disable=underscore-not-escaped %>
<%= gettext('Single Line') %>
<%= gettext('Single Line') %><% // safe-lint: disable=underscore-not-escaped %>
Arguments:
string_lines: A StringLines containing the contents of the file in
violation.
Side Effect:
Sets self.is_disabled as appropriate based on whether the pragma is
found.
"""
# disable pragma can be at the start of the preceding line
has_previous_line = self.start_line > 1
if has_previous_line:
line_to_check = string_lines.line_number_to_line(self.start_line - 1)
self._mark_disabled(line_to_check, scope_start_string=True)
if self.is_disabled:
return
# TODO: this should work at end of any line of the violation
# disable pragma can be anywhere on the first line of the violation
line_to_check = string_lines.line_number_to_line(self.start_line)
self._mark_disabled(line_to_check, scope_start_string=False)
def sort_key(self):
"""
Returns a key that can be sorted on
"""
return (self.start_line, self.start_column, self.rule.rule_id)
def first_line(self):
"""
Returns the initial line of code of the violation.
"""
return self.lines[0]
def prepare_results(self, full_path, string_lines):
"""
Preps this instance for results reporting.
Arguments:
full_path: Path of the file in violation.
string_lines: A StringLines containing the contents of the file in
violation.
"""
self.full_path = full_path
start_index = self.expression.start_index
self.start_line = string_lines.index_to_line_number(start_index)
self.start_column = string_lines.index_to_column_number(start_index)
end_index = self.expression.end_index
if end_index is not None:
self.end_line = string_lines.index_to_line_number(end_index)
self.end_column = string_lines.index_to_column_number(end_index)
else:
self.end_line = self.start_line
self.end_column = '?'
for line_number in range(self.start_line, self.end_line + 1):
self.lines.append(string_lines.line_number_to_line(line_number))
self._mark_expression_disabled(string_lines)
def print_results(self, options, out):
"""
Prints the results represented by this rule violation.
Arguments:
options: A list of the following options:
list_files: True to print only file names, and False to print
all violations.
verbose: True for multiple lines of context, False single line.
out: output file
"""
if options['verbose']:
end_line = self.end_line + 1
else:
end_line = self.start_line + 1
for line_number in range(self.start_line, end_line):
if line_number == self.start_line:
column = self.start_column
rule_id = self.rule.rule_id + ":"
else:
column = 1
rule_id = " " * (len(self.rule.rule_id) + 1)
line = self.lines[line_number - self.start_line].encode(encoding='utf-8')
print("{}: {}:{}: {} {}".format(
self.full_path,
line_number,
column,
rule_id,
line
), file=out)
class SummaryResults(object):
"""
Contains the summary results for all violations.
"""
def __init__(self):
"""
Init method.
"""
self.total_violations = 0
self.totals_by_rule = dict.fromkeys(
[rule.rule_id for rule in Rules.__members__.values()], 0
)
def add_violation(self, violation):
"""
Adds a violation to the summary details.
Arguments:
violation: The violation to add to the summary.
"""
self.total_violations += 1
self.totals_by_rule[violation.rule.rule_id] += 1
def print_results(self, options, out):
"""
Prints the results (i.e. violations) in this file.
Arguments:
options: A list of the following options:
list_files: True to print only file names, and False to print
all violations.
rule_totals: If True include totals by rule.
out: output file
"""
if options['list_files'] is False:
if options['rule_totals']:
max_rule_id_len = max(len(rule_id) for rule_id in self.totals_by_rule)
print("", file=out)
for rule_id in sorted(self.totals_by_rule.keys()):
padding = " " * (max_rule_id_len - len(rule_id))
print("{}: {}{} violations".format(rule_id, padding, self.totals_by_rule[rule_id]), file=out)
print("", file=out)
# matches output of jshint for simplicity
print("", file=out)
print("{} violations total".format(self.total_violations), file=out)
class FileResults(object):
"""
Contains the results, or violations, for a file.
"""
def __init__(self, full_path):
"""
Init method.
Arguments:
full_path: The full path for this file.
"""
self.full_path = full_path
self.directory = os.path.dirname(full_path)
self.is_file = os.path.isfile(full_path)
self.violations = []
def prepare_results(self, file_string, line_comment_delim=None):
"""
Prepares the results for output for this file.
Arguments:
file_string: The string of content for this file.
line_comment_delim: A string representing the start of a line
comment. For example "##" for Mako and "//" for JavaScript.
"""
string_lines = StringLines(file_string)
for violation in self.violations:
violation.prepare_results(self.full_path, string_lines)
if line_comment_delim is not None:
self._filter_commented_code(line_comment_delim)
def print_results(self, options, summary_results, out):
"""
Prints the results (i.e. violations) in this file.
Arguments:
options: A list of the following options:
list_files: True to print only file names, and False to print
all violations.
summary_results: A SummaryResults with a summary of the violations.
verbose: True for multiple lines of context, False single line.
out: output file
Side effect:
Updates the passed SummaryResults.
"""
if options['list_files']:
if self.violations is not None and 0 < len(self.violations):
print(self.full_path, file=out)
else:
self.violations.sort(key=lambda violation: violation.sort_key())
for violation in self.violations:
if not violation.is_disabled:
violation.print_results(options, out)
summary_results.add_violation(violation)
def _filter_commented_code(self, line_comment_delim):
"""
Remove any violations that were found in commented out code.
Arguments:
line_comment_delim: A string representing the start of a line
comment. For example "##" for Mako and "//" for JavaScript.
"""
self.violations = [v for v in self.violations if not self._is_commented(v, line_comment_delim)]
def _is_commented(self, violation, line_comment_delim):
"""
Checks if violation line is commented out.
Arguments:
violation: The violation to check
line_comment_delim: A string representing the start of a line
comment. For example "##" for Mako and "//" for JavaScript.
Returns:
True if the first line of the violation is actually commented out,
False otherwise.
"""
if 'parse' in violation.rule.rule_id:
# For parse rules, don't filter them because the comment could be a
# part of the parse issue to begin with.
return False
else:
return violation.first_line().lstrip().startswith(line_comment_delim)
class ParseString(object):
"""
ParseString is the result of parsing a string out of a template.
A ParseString has the following attributes:
start_index: The index of the first quote, or None if none found
end_index: The index following the closing quote, or None if
unparseable
quote_length: The length of the quote. Could be 3 for a Python
triple quote. Or None if none found.
string: the text of the parsed string, or None if none found.
string_inner: the text inside the quotes of the parsed string, or None
if none found.
"""
def __init__(self, template, start_index, end_index):
"""
Init method.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
"""
self.end_index = None
self.quote_length = None
self.string = None
self.string_inner = None
self.start_index = self._find_string_start(template, start_index, end_index)
if self.start_index is not None:
result = self._parse_string(template, self.start_index)
if result is not None:
self.end_index = result['end_index']
self.quote_length = result['quote_length']
self.string = result['string']
self.string_inner = result['string_inner']
def _find_string_start(self, template, start_index, end_index):
"""
Finds the index of the end of start of a string. In other words, the
first single or double quote.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
Returns:
The start index of the first single or double quote, or None if no
quote was found.
"""
quote_regex = re.compile(r"""['"]""")
start_match = quote_regex.search(template, start_index, end_index)
if start_match is None:
return None
else:
return start_match.start()
def _parse_string(self, template, start_index):
"""
Finds the indices of a string inside a template.
Arguments:
template: The template to be searched.
start_index: The start index of the open quote.
Returns:
A dict containing the following, or None if not parseable:
end_index: The index following the closing quote
quote_length: The length of the quote. Could be 3 for a Python
triple quote.
string: the text of the parsed string
string_inner: the text inside the quotes of the parsed string
"""
quote = template[start_index]
if quote not in ["'", '"']:
raise ValueError("start_index must refer to a single or double quote.")
triple_quote = quote * 3
if template.startswith(triple_quote, start_index):
quote = triple_quote
next_start_index = start_index + len(quote)
while True:
quote_end_index = template.find(quote, next_start_index)
backslash_index = template.find("\\", next_start_index)
if quote_end_index < 0:
return None
if 0 <= backslash_index < quote_end_index:
next_start_index = backslash_index + 2
else:
end_index = quote_end_index + len(quote)
quote_length = len(quote)
string = template[start_index:end_index]
return {
'end_index': end_index,
'quote_length': quote_length,
'string': string,
'string_inner': string[quote_length:-quote_length],
}
class BaseLinter(object):
"""
BaseLinter provides some helper functions that are used by multiple linters.
"""
LINE_COMMENT_DELIM = None
def _is_valid_directory(self, skip_dirs, directory):
"""
Determines if the provided directory is a directory that could contain
a file that needs to be linted.
Arguments:
skip_dirs: The directories to be skipped.
directory: The directory to be linted.
Returns:
True if this directory should be linted for violations and False
otherwise.
"""
if is_skip_dir(skip_dirs, directory):
return False
return True
def _load_file(self, file_full_path):
"""
Loads a file into a string.
Arguments:
file_full_path: The full path of the file to be loaded.
Returns:
A string containing the files contents.
"""
with open(file_full_path, 'r') as input_file:
file_contents = input_file.read()
return file_contents.decode(encoding='utf-8')
def _load_and_check_file_is_safe(self, file_full_path, lint_function, results):
"""
Loads the Python file and checks if it is in violation.
Arguments:
file_full_path: The file to be loaded and linted.
lint_function: A function that will lint for violations. It must
take two arguments:
1) string contents of the file
2) results object
results: A FileResults to be used for this file
Returns:
The file results containing any violations.
"""
file_contents = self._load_file(file_full_path)
lint_function(file_contents, results)
return results
def _find_closing_char_index(
self, start_delim, open_char, close_char, template, start_index, num_open_chars=0, strings=None
):
"""
Finds the index of the closing char that matches the opening char.
For example, this could be used to find the end of a Mako expression,
where the open and close characters would be '{' and '}'.
Arguments:
start_delim: If provided (e.g. '${' for Mako expressions), the
closing character must be found before the next start_delim.
open_char: The opening character to be matched (e.g '{')
close_char: The closing character to be matched (e.g '}')
template: The template to be searched.
start_index: The start index of the last open char.
num_open_chars: The current number of open chars.
strings: A list of ParseStrings already parsed
Returns:
A dict containing the following, or None if unparseable:
close_char_index: The index of the closing character
strings: a list of ParseStrings
"""
strings = [] if strings is None else strings
# Find start index of an uncommented line.
start_index = self._uncommented_start_index(template, start_index)
# loop until we found something useful on an uncommented out line
while start_index is not None:
close_char_index = template.find(close_char, start_index)
if close_char_index < 0:
# If we can't find a close char, let's just quit.
return None
open_char_index = template.find(open_char, start_index, close_char_index)
parse_string = ParseString(template, start_index, close_char_index)
valid_index_list = [close_char_index]
if 0 <= open_char_index:
valid_index_list.append(open_char_index)
if parse_string.start_index is not None:
valid_index_list.append(parse_string.start_index)
min_valid_index = min(valid_index_list)
start_index = self._uncommented_start_index(template, min_valid_index)
if start_index == min_valid_index:
break
if start_index is None:
# No uncommented code to search.
return None
if parse_string.start_index == min_valid_index:
strings.append(parse_string)
if parse_string.end_index is None:
return None
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=parse_string.end_index,
num_open_chars=num_open_chars, strings=strings
)
if open_char_index == min_valid_index:
if start_delim is not None:
# if we find another starting delim, consider this unparseable
start_delim_index = template.find(start_delim, start_index, close_char_index)
if 0 <= start_delim_index < open_char_index:
return None
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=open_char_index + 1,
num_open_chars=num_open_chars + 1, strings=strings
)
if num_open_chars == 0:
return {
'close_char_index': close_char_index,
'strings': strings,
}
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=close_char_index + 1,
num_open_chars=num_open_chars - 1, strings=strings
)
def _uncommented_start_index(self, template, start_index):
"""
Finds the first start_index that is on an uncommented line.
Arguments:
template: The template to be searched.
start_index: The start index of the last open char.
Returns:
If start_index is on an uncommented out line, returns start_index.
Otherwise, returns the start_index of the first line that is
uncommented, if there is one. Otherwise, returns None.
"""
if self.LINE_COMMENT_DELIM is not None:
line_start_index = StringLines(template).index_to_line_start_index(start_index)
uncommented_line_start_index_regex = re.compile("^(?!\s*{})".format(self.LINE_COMMENT_DELIM), re.MULTILINE)
# Finds the line start index of the first uncommented line, including the current line.
match = uncommented_line_start_index_regex.search(template, line_start_index)
if match is None:
# No uncommented lines.
return None
elif match.start() < start_index:
# Current line is uncommented, so return original start_index.
return start_index
else:
# Return start of first uncommented line.
return match.start()
else:
# No line comment delimeter, so this acts as a no-op.
return start_index
class UnderscoreTemplateLinter(BaseLinter):
"""
The linter for Underscore.js template files.
"""
def __init__(self):
"""
Init method.
"""
super(UnderscoreTemplateLinter, self).__init__()
self._skip_underscore_dirs = SKIP_DIRS + ('test',)
def process_file(self, directory, file_name):
"""
Process file to determine if it is an Underscore template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential underscore file
Returns:
The file results containing any violations.
"""
full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(full_path)
if not self._is_valid_directory(self._skip_underscore_dirs, directory):
return results
if not file_name.lower().endswith('.underscore'):
return results
return self._load_and_check_file_is_safe(full_path, self.check_underscore_file_is_safe, results)
def check_underscore_file_is_safe(self, underscore_template, results):
"""
Checks for violations in an Underscore.js template.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A file results objects to which violations will be added.
"""
self._check_underscore_expressions(underscore_template, results)
results.prepare_results(underscore_template)
def _check_underscore_expressions(self, underscore_template, results):
"""
Searches for Underscore.js expressions that contain violations.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A list of results into which violations will be added.
"""
expressions = self._find_unescaped_expressions(underscore_template)
for expression in expressions:
if not self._is_safe_unescaped_expression(expression):
results.violations.append(ExpressionRuleViolation(
Rules.underscore_not_escaped, expression
))
def _is_safe_unescaped_expression(self, expression):
"""
Determines whether an expression is safely escaped, even though it is
using the expression syntax that doesn't itself escape (i.e. <%= ).
In some cases it is ok to not use the Underscore.js template escape
(i.e. <%- ) because the escaping is happening inside the expression.
Safe examples::
<%= HtmlUtils.ensureHtml(message) %>
<%= _.escape(message) %>
Arguments:
expression: The Expression being checked.
Returns:
True if the Expression has been safely escaped, and False otherwise.
"""
if expression.expression_inner.startswith('HtmlUtils.'):
return True
if expression.expression_inner.startswith('_.escape('):
return True
return False
def _find_unescaped_expressions(self, underscore_template):
"""
Returns a list of unsafe expressions.
At this time all expressions that are unescaped are considered unsafe.
Arguments:
underscore_template: The contents of the Underscore.js template.
Returns:
A list of Expressions.
"""
unescaped_expression_regex = re.compile("<%=.*?%>", re.DOTALL)
expressions = []
for match in unescaped_expression_regex.finditer(underscore_template):
expression = Expression(
match.start(), match.end(), template=underscore_template, start_delim="<%=", end_delim="%>"
)
expressions.append(expression)
return expressions
class JavaScriptLinter(BaseLinter):
"""
The linter for JavaScript and CoffeeScript files.
"""
LINE_COMMENT_DELIM = "//"
def __init__(self):
"""
Init method.
"""
super(JavaScriptLinter, self).__init__()
self._skip_javascript_dirs = SKIP_DIRS + ('i18n', 'static/coffee')
self._skip_coffeescript_dirs = SKIP_DIRS
self.underscore_linter = UnderscoreTemplateLinter()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a JavaScript file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential JavaScript file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.js') and not file_name.lower().endswith('.min.js'):
skip_dirs = self._skip_javascript_dirs
elif file_name.lower().endswith('.coffee'):
skip_dirs = self._skip_coffeescript_dirs
else:
return results
if not self._is_valid_directory(skip_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_javascript_file_is_safe, results)
def check_javascript_file_is_safe(self, file_contents, results):
"""
Checks for violations in a JavaScript file.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
no_caller_check = None
no_argument_check = None
self._check_jquery_function(
file_contents, "append", Rules.javascript_jquery_append, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "prepend", Rules.javascript_jquery_prepend, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "unwrap|wrap|wrapAll|wrapInner|after|before|replaceAll|replaceWith",
Rules.javascript_jquery_insertion, no_caller_check, self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "appendTo|prependTo|insertAfter|insertBefore",
Rules.javascript_jquery_insert_into_target, self._is_jquery_insert_caller_safe, no_argument_check, results
)
self._check_jquery_function(
file_contents, "html", Rules.javascript_jquery_html, no_caller_check,
self._is_jquery_html_argument_safe, results
)
self._check_javascript_interpolate(file_contents, results)
self._check_javascript_escape(file_contents, results)
self._check_concat_with_html(file_contents, Rules.javascript_concat_html, results)
self.underscore_linter.check_underscore_file_is_safe(file_contents, results)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def _get_expression_for_function(self, file_contents, function_start_match):
"""
Returns an expression that matches the function call opened with
function_start_match.
Arguments:
file_contents: The contents of the JavaScript file.
function_start_match: A regex match representing the start of the function
call (e.g. ".escape(").
Returns:
An Expression that best matches the function.
"""
start_index = function_start_match.start()
inner_start_index = function_start_match.end()
result = self._find_closing_char_index(
None, "(", ")", file_contents, start_index=inner_start_index
)
if result is not None:
end_index = result['close_char_index'] + 1
expression = Expression(
start_index, end_index, template=file_contents, start_delim=function_start_match.group(), end_delim=")"
)
else:
expression = Expression(start_index)
return expression
def _check_javascript_interpolate(self, file_contents, results):
"""
Checks that interpolate() calls are safe.
Only use of StringUtils.interpolate() or HtmlUtils.interpolateText()
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "StringUtils.", because those are safe
regex = re.compile(r"(?<!StringUtils).interpolate\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(Rules.javascript_interpolate, expression))
def _check_javascript_escape(self, file_contents, results):
"""
Checks that only necessary escape() are used.
Allows for _.escape(), although this shouldn't be the recommendation.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "_.", because those are safe
regex = regex = re.compile(r"(?<!_).escape\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(Rules.javascript_escape, expression))
def _check_jquery_function(self, file_contents, function_names, rule, is_caller_safe, is_argument_safe, results):
"""
Checks that the JQuery function_names (e.g. append(), prepend()) calls
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
function_names: A pipe delimited list of names of the functions
(e.g. "wrap|after|before").
rule: The name of the rule to use for validation errors (e.g.
Rules.javascript_jquery_append).
is_caller_safe: A function to test if caller of the JQuery function
is safe.
is_argument_safe: A function to test if the argument passed to the
JQuery function is safe.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "HtmlUtils.", because those are safe
regex = re.compile(r"(?<!HtmlUtils).(?:{})\(".format(function_names))
for function_match in regex.finditer(file_contents):
is_violation = True
expression = self._get_expression_for_function(file_contents, function_match)
if expression.end_index is not None:
start_index = expression.start_index
inner_start_index = function_match.end()
close_paren_index = expression.end_index - 1
function_argument = file_contents[inner_start_index:close_paren_index].strip()
if is_argument_safe is not None and is_caller_safe is None:
is_violation = is_argument_safe(function_argument) is False
elif is_caller_safe is not None and is_argument_safe is None:
line_start_index = StringLines(file_contents).index_to_line_start_index(start_index)
caller_line_start = file_contents[line_start_index:start_index]
is_violation = is_caller_safe(caller_line_start) is False
else:
raise ValueError("Must supply either is_argument_safe, or is_caller_safe, but not both.")
if is_violation:
results.violations.append(ExpressionRuleViolation(rule, expression))
def _is_jquery_argument_safe_html_utils_call(self, argument):
"""
Checks that the argument sent to a jQuery DOM insertion function is a
safe call to HtmlUtils.
A safe argument is of the form:
- HtmlUtils.xxx(anything).toString()
- edx.HtmlUtils.xxx(anything).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
# match on HtmlUtils.xxx().toString() or edx.HtmlUtils
match = re.search(r"(?:edx\.)?HtmlUtils\.[a-zA-Z0-9]+\(.*\)\.toString\(\)", argument)
return match is not None and match.group() == argument
def _is_jquery_argument_safe(self, argument):
"""
Check the argument sent to a jQuery DOM insertion function (e.g.
append()) to check if it is safe.
Safe arguments include:
- the argument can end with ".el", ".$el" (with no concatenation)
- the argument can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
- the argument can be a single string literal with no HTML tags
- the argument can be a call to $() with the first argument a string
literal with a single HTML tag. For example, ".append($('<br/>'))"
or ".append($('<br/>'))".
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
match_variable_name = re.search("[_$a-zA-Z]+[_$a-zA-Z0-9]*", argument)
if match_variable_name is not None and match_variable_name.group() == argument:
if argument.endswith('El') or argument.startswith('$'):
return True
elif argument.startswith('"') or argument.startswith("'"):
# a single literal string with no HTML is ok
# 1. it gets rid of false negatives for non-jquery calls (e.g. graph.append("g"))
# 2. JQuery will treat this as a plain text string and will escape any & if needed.
string = ParseString(argument, 0, len(argument))
if string.string == argument and "<" not in argument:
return True
elif argument.startswith('$('):
# match on JQuery calls with single string and single HTML tag
# Examples:
# $("<span>")
# $("<div/>")
# $("<div/>", {...})
match = re.search(r"""\$\(\s*['"]<[a-zA-Z0-9]+\s*[/]?>['"]\s*[,)]""", argument)
if match is not None:
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
# check rules that shouldn't use concatenation
elif "+" not in argument:
if argument.endswith('.el') or argument.endswith('.$el'):
return True
return False
def _is_jquery_html_argument_safe(self, argument):
"""
Check the argument sent to the jQuery html() function to check if it is
safe.
Safe arguments to html():
- no argument (i.e. getter rather than setter)
- empty string is safe
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to html() in code (i.e. html(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
if argument == "" or argument == "''" or argument == '""':
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
return False
def _is_jquery_insert_caller_safe(self, caller_line_start):
"""
Check that the caller of a jQuery DOM insertion function that takes a
target is safe (e.g. thisEl.appendTo(target)).
If original line was::
draggableObj.iconEl.appendTo(draggableObj.containerEl);
Parameter caller_line_start would be:
draggableObj.iconEl
Safe callers include:
- the caller can be ".el", ".$el"
- the caller can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
Arguments:
caller_line_start: The line leading up to the jQuery function call.
Returns:
True if the caller is safe, and False otherwise.
"""
# matches end of line for caller, which can't itself be a function
caller_match = re.search(r"(?:\s*|[.])([_$a-zA-Z]+[_$a-zA-Z0-9])*$", caller_line_start)
if caller_match is None:
return False
caller = caller_match.group(1)
if caller is None:
return False
elif caller.endswith('El') or caller.startswith('$'):
return True
elif caller == 'el' or caller == 'parentNode':
return True
return False
def _check_concat_with_html(self, file_contents, rule, results):
"""
Checks that strings with HTML are not concatenated
Arguments:
file_contents: The contents of the JavaScript file.
rule: The rule that was violated if this fails.
results: A file results objects to which violations will be added.
"""
lines = StringLines(file_contents)
last_expression = None
# Match quoted strings that starts with '<' or ends with '>'.
regex_string_with_html = r"""
{quote} # Opening quote.
(
\s*< # Starts with '<' (ignoring spaces)
([^{quote}]|[\\]{quote})* # followed by anything but a closing quote.
| # Or,
([^{quote}]|[\\]{quote})* # Anything but a closing quote
>\s* # ending with '>' (ignoring spaces)
)
{quote} # Closing quote.
"""
# Match single or double quote.
regex_string_with_html = "({}|{})".format(
regex_string_with_html.format(quote="'"),
regex_string_with_html.format(quote='"'),
)
# Match quoted HTML strings next to a '+'.
regex_concat_with_html = re.compile(
r"(\+\s*{string_with_html}|{string_with_html}\s*\+)".format(
string_with_html=regex_string_with_html,
),
re.VERBOSE
)
for match in regex_concat_with_html.finditer(file_contents):
found_new_violation = False
if last_expression is not None:
last_line = lines.index_to_line_number(last_expression.start_index)
# check if violation should be expanded to more of the same line
if last_line == lines.index_to_line_number(match.start()):
last_expression = Expression(
last_expression.start_index, match.end(), template=file_contents
)
else:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
found_new_violation = True
else:
found_new_violation = True
if found_new_violation:
last_expression = Expression(
match.start(), match.end(), template=file_contents
)
# add final expression
if last_expression is not None:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
class BaseVisitor(ast.NodeVisitor):
"""
Base class for AST NodeVisitor used for Python safe linting.
Important: This base visitor skips all __repr__ function definitions.
"""
def __init__(self, file_contents, results):
"""
Init method.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
super(BaseVisitor, self).__init__()
self.file_contents = file_contents
self.lines = StringLines(self.file_contents)
self.results = results
def node_to_expression(self, node):
"""
Takes a node and translates it to an expression to be used with
violations.
Arguments:
node: An AST node.
"""
line_start_index = self.lines.line_number_to_start_index(node.lineno)
start_index = line_start_index + node.col_offset
if isinstance(node, ast.Str):
# Triple quotes give col_offset of -1 on the last line of the string.
if node.col_offset == -1:
triple_quote_regex = re.compile("""['"]{3}""")
end_triple_quote_match = triple_quote_regex.search(self.file_contents, line_start_index)
open_quote_index = self.file_contents.rfind(end_triple_quote_match.group(), 0, end_triple_quote_match.start())
if open_quote_index > 0:
start_index = open_quote_index
else:
# If we can't find a starting quote, let's assume that what
# we considered the end quote is really the start quote.
start_index = end_triple_quote_match.start()
string = ParseString(self.file_contents, start_index, len(self.file_contents))
return Expression(string.start_index, string.end_index)
else:
return Expression(start_index)
def visit_FunctionDef(self, node):
"""
Skips processing of __repr__ functions, since these sometimes use '<'
for non-HTML purposes.
Arguments:
node: An AST node.
"""
if node.name != '__repr__':
self.generic_visit(node)
class HtmlStringVisitor(BaseVisitor):
"""
Checks for strings that contain HTML. Assumes any string with < or > is
considered potential HTML.
To be used only with strings in context of format or concat.
"""
def __init__(self, file_contents, results, skip_wrapped_html=False):
"""
Init function.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
skip_wrapped_html: True if visitor should skip strings wrapped with
HTML() or Text(), and False otherwise.
"""
super(HtmlStringVisitor, self).__init__(file_contents, results)
self.skip_wrapped_html = skip_wrapped_html
self.unsafe_html_string_nodes = []
self.over_escaped_entity_string_nodes = []
self.has_text_or_html_call = False
def visit_Str(self, node):
"""
When strings are visited, checks if it contains HTML.
Arguments:
node: An AST node.
"""
# Skips '<' (and '>') in regex named groups. For example, "(?P<group>)".
if re.search('[(][?]P<', node.s) is None and re.search('<', node.s) is not None:
self.unsafe_html_string_nodes.append(node)
if re.search(r"&[#]?[a-zA-Z0-9]+;", node.s):
self.over_escaped_entity_string_nodes.append(node)
def visit_Call(self, node):
"""
Skips processing of string contained inside HTML() and Text() calls when
skip_wrapped_html is True.
Arguments:
node: An AST node.
"""
is_html_or_text_call = isinstance(node.func, ast.Name) and node.func.id in ['HTML', 'Text']
if self.skip_wrapped_html and is_html_or_text_call:
self.has_text_or_html_call = True
else:
self.generic_visit(node)
class ContainsFormatVisitor(BaseVisitor):
"""
Checks if there are any nested format() calls.
This visitor is meant to be called on HTML() and Text() ast.Call nodes to
search for any illegal nested format() calls.
"""
def __init__(self, file_contents, results):
"""
Init function.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
super(ContainsFormatVisitor, self).__init__(file_contents, results)
self.contains_format_call = False
def visit_Attribute(self, node):
"""
Simple check for format calls (attribute).
Arguments:
node: An AST node.
"""
# Attribute(expr value, identifier attr, expr_context ctx)
if node.attr == 'format':
self.contains_format_call = True
else:
self.generic_visit(node)
class FormatInterpolateVisitor(BaseVisitor):
"""
Checks if format() interpolates any HTML() or Text() calls. In other words,
are Text() or HTML() calls nested inside the call to format().
This visitor is meant to be called on a format() attribute node.
"""
def __init__(self, file_contents, results):
"""
Init function.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
super(FormatInterpolateVisitor, self).__init__(file_contents, results)
self.interpolates_text_or_html = False
self.format_caller_node = None
def visit_Call(self, node):
"""
Checks all calls. Remembers the caller of the initial format() call, or
in other words, the left-hand side of the call. Also tracks if HTML()
or Text() calls were seen.
Arguments:
node: The AST root node.
"""
if isinstance(node.func, ast.Attribute) and node.func.attr is 'format':
if self.format_caller_node is None:
# Store the caller, or left-hand-side node of the initial
# format() call.
self.format_caller_node = node.func.value
elif isinstance(node.func, ast.Name) and node.func.id in ['HTML', 'Text']:
# found Text() or HTML() call in arguments passed to format()
self.interpolates_text_or_html = True
self.generic_visit(node)
def generic_visit(self, node):
"""
Determines whether or not to continue to visit nodes according to the
following rules:
- Once a Text() or HTML() call has been found, stop visiting more nodes.
- Skip the caller of the outer-most format() call, or in other words,
the left-hand side of the call.
Arguments:
node: The AST root node.
"""
if self.interpolates_text_or_html is False:
if self.format_caller_node is not node:
super(FormatInterpolateVisitor, self).generic_visit(node)
class OuterFormatVisitor(BaseVisitor):
"""
Only visits outer most Python format() calls. These checks are not repeated
for any nested format() calls.
This visitor is meant to be used once from the root.
"""
def visit_Call(self, node):
"""
Checks that format() calls which contain HTML() or Text() use HTML() or
Text() as the caller. In other words, Text() or HTML() must be used
before format() for any arguments to format() that contain HTML() or
Text().
Arguments:
node: An AST node.
"""
if isinstance(node.func, ast.Attribute) and node.func.attr == 'format':
visitor = HtmlStringVisitor(self.file_contents, self.results, True)
visitor.visit(node)
for unsafe_html_string_node in visitor.unsafe_html_string_nodes:
self.results.violations.append(ExpressionRuleViolation(
Rules.python_wrap_html, self.node_to_expression(unsafe_html_string_node)
))
# Do not continue processing child nodes of this format() node.
else:
self.generic_visit(node)
class AllNodeVisitor(BaseVisitor):
"""
Visits all nodes and does not interfere with calls to generic_visit(). This
is used in conjunction with other visitors to check for a variety of
violations.
This visitor is meant to be used once from the root.
"""
def visit_Attribute(self, node):
"""
Checks for uses of deprecated `display_name_with_default_escaped`.
Arguments:
node: An AST node.
"""
if node.attr == 'display_name_with_default_escaped':
self.results.violations.append(ExpressionRuleViolation(
Rules.python_deprecated_display_name, self.node_to_expression(node)
))
self.generic_visit(node)
def visit_Call(self, node):
"""
Checks for a variety of violations:
- Checks that format() calls with nested HTML() or Text() calls use
HTML() or Text() on the left-hand side.
- For each HTML() and Text() call, calls into separate visitor to check
for inner format() calls.
Arguments:
node: An AST node.
"""
if isinstance(node.func, ast.Attribute) and node.func.attr == 'format':
visitor = FormatInterpolateVisitor(self.file_contents, self.results)
visitor.visit(node)
if visitor.interpolates_text_or_html:
format_caller = node.func.value
is_caller_html_or_text = isinstance(format_caller, ast.Call) and \
isinstance(format_caller.func, ast.Name) and \
format_caller.func.id in ['Text', 'HTML']
# If format call has nested Text() or HTML(), then the caller,
# or left-hand-side of the format() call, must be a call to
# Text() or HTML().
if is_caller_html_or_text is False:
self.results.violations.append(ExpressionRuleViolation(
Rules.python_requires_html_or_text, self.node_to_expression(node.func)
))
elif isinstance(node.func, ast.Name) and node.func.id in ['HTML', 'Text']:
visitor = ContainsFormatVisitor(self.file_contents, self.results)
visitor.visit(node)
if visitor.contains_format_call:
self.results.violations.append(ExpressionRuleViolation(
Rules.python_close_before_format, self.node_to_expression(node.func)
))
self.generic_visit(node)
def visit_BinOp(self, node):
"""
Checks for concat using '+' and interpolation using '%' with strings
containing HTML.
"""
rule = None
if isinstance(node.op, ast.Mod):
rule = Rules.python_interpolate_html
elif isinstance(node.op, ast.Add):
rule = Rules.python_concat_html
if rule is not None:
visitor = HtmlStringVisitor(self.file_contents, self.results)
visitor.visit(node.left)
has_illegal_html_string = len(visitor.unsafe_html_string_nodes) > 0
# Create new visitor to clear state.
visitor = HtmlStringVisitor(self.file_contents, self.results)
visitor.visit(node.right)
has_illegal_html_string = has_illegal_html_string or len(visitor.unsafe_html_string_nodes) > 0
if has_illegal_html_string:
self.results.violations.append(ExpressionRuleViolation(
rule, self.node_to_expression(node)
))
self.generic_visit(node)
class PythonLinter(BaseLinter):
"""
The linter for Python files.
The current implementation of the linter does naive Python parsing. It does
not use the parser. One known issue is that parsing errors found inside a
docstring need to be disabled, rather than being automatically skipped.
Skipping docstrings is an enhancement that could be added.
"""
LINE_COMMENT_DELIM = "#"
def __init__(self):
"""
Init method.
"""
super(PythonLinter, self).__init__()
self._skip_python_dirs = SKIP_DIRS + ('tests', 'test/acceptance')
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Python file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Python file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.py') is False:
return results
# skip tests.py files
# TODO: Add configuration for files and paths
if file_name.lower().endswith('tests.py'):
return results
# skip this linter code (i.e. safe_template_linter.py)
if file_name == os.path.basename(__file__):
return results
if not self._is_valid_directory(self._skip_python_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_python_file_is_safe, results)
def check_python_file_is_safe(self, file_contents, results):
"""
Checks for violations in a Python file.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
root_node = self.parse_python_code(file_contents, results)
self.check_python_code_is_safe(file_contents, root_node, results)
# Check rules specific to .py files only
# Note that in template files, the scope is different, so you can make
# different assumptions.
if root_node is not None:
# check format() rules that can be run on outer-most format() calls
visitor = OuterFormatVisitor(file_contents, results)
visitor.visit(root_node)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def check_python_code_is_safe(self, python_code, root_node, results):
"""
Checks for violations in Python code snippet. This can also be used for
Python that appears in files other than .py files, like in templates.
Arguments:
python_code: The contents of the Python code.
root_node: The root node of the Python code parsed by AST.
results: A file results objects to which violations will be added.
"""
if root_node is not None:
# check illegal concatenation and interpolation
visitor = AllNodeVisitor(python_code, results)
visitor.visit(root_node)
# check rules parse with regex
self._check_custom_escape(python_code, results)
def parse_python_code(self, python_code, results):
"""
Parses Python code.
Arguments:
python_code: The Python code to be parsed.
Returns:
The root node that was parsed, or None for SyntaxError.
"""
python_code = self._strip_file_encoding(python_code)
try:
return ast.parse(python_code)
except SyntaxError as e:
if e.offset is None:
expression = Expression(0)
else:
lines = StringLines(python_code)
line_start_index = lines.line_number_to_start_index(e.lineno)
expression = Expression(line_start_index + e.offset)
results.violations.append(ExpressionRuleViolation(
Rules.python_parse_error, expression
))
return None
def _strip_file_encoding(self, file_contents):
"""
Removes file encoding from file_contents because the file was already
read into Unicode, and the AST parser complains.
Arguments:
file_contents: The Python file contents.
Returns:
The Python file contents with the encoding stripped.
"""
# PEP-263 Provides Regex for Declaring Encoding
# Example: -*- coding: <encoding name> -*-
# This is only allowed on the first two lines, and it must be stripped
# before parsing, because we have already read into Unicode and the
# AST parser complains.
encoding_regex = re.compile(r"^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
encoding_match = encoding_regex.search(file_contents)
# If encoding comment not found on first line, search second line.
if encoding_match is None:
lines = StringLines(file_contents)
if lines.line_count() >= 2:
encoding_match = encoding_regex.search(lines.line_number_to_line(2))
# If encoding was found, strip it
if encoding_match is not None:
file_contents = file_contents.replace(encoding_match.group(), '#', 1)
return file_contents
def _check_custom_escape(self, file_contents, results):
"""
Checks for custom escaping calls, rather than using a standard escaping
method.
Arguments:
file_contents: The contents of the Python file
results: A list of results into which violations will be added.
"""
for match in re.finditer("(<.*<|<.*<)", file_contents):
expression = Expression(match.start(), match.end())
results.violations.append(ExpressionRuleViolation(
Rules.python_custom_escape, expression
))
class MakoTemplateLinter(BaseLinter):
"""
The linter for Mako template files.
"""
LINE_COMMENT_DELIM = "##"
def __init__(self):
"""
Init method.
"""
super(MakoTemplateLinter, self).__init__()
self.javascript_linter = JavaScriptLinter()
self.python_linter = PythonLinter()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Mako template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Mako file
Returns:
The file results containing any violations.
"""
mako_file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(mako_file_full_path)
if not results.is_file:
return results
if not self._is_valid_directory(directory):
return results
# TODO: When safe-by-default is turned on at the platform level, will we:
# 1. Turn it on for .html only, or
# 2. Turn it on for all files, and have different rulesets that have
# different rules of .xml, .html, .js, .txt Mako templates (e.g. use
# the n filter to turn off h for some of these)?
# For now, we only check .html and .xml files
if not (file_name.lower().endswith('.html') or file_name.lower().endswith('.xml')):
return results
return self._load_and_check_file_is_safe(mako_file_full_path, self._check_mako_file_is_safe, results)
def _is_valid_directory(self, directory):
"""
Determines if the provided directory is a directory that could contain
Mako template files that need to be linted.
Arguments:
directory: The directory to be linted.
Returns:
True if this directory should be linted for Mako template violations
and False otherwise.
"""
if is_skip_dir(SKIP_DIRS, directory):
return False
# TODO: This is an imperfect guess concerning the Mako template
# directories. This needs to be reviewed before turning on safe by
# default at the platform level.
if ('/templates/' in directory) or directory.endswith('/templates'):
return True
return False
def _check_mako_file_is_safe(self, mako_template, results):
"""
Checks for violations in a Mako template.
Arguments:
mako_template: The contents of the Mako template.
results: A file results objects to which violations will be added.
"""
if self._is_django_template(mako_template):
return
has_page_default = self._has_page_default(mako_template, results)
self._check_mako_expressions(mako_template, has_page_default, results)
self._check_mako_python_blocks(mako_template, has_page_default, results)
results.prepare_results(mako_template, line_comment_delim=self.LINE_COMMENT_DELIM)
def _is_django_template(self, mako_template):
"""
Determines if the template is actually a Django template.
Arguments:
mako_template: The template code.
Returns:
True if this is really a Django template, and False otherwise.
"""
if re.search('({%.*%})|({{.*}})', mako_template) is not None:
return True
return False
def _get_page_tag_count(self, mako_template):
"""
Determines the number of page expressions in the Mako template. Ignores
page expressions that are commented out.
Arguments:
mako_template: The contents of the Mako template.
Returns:
The number of page expressions
"""
count = len(re.findall('<%page ', mako_template, re.IGNORECASE))
count_commented = len(re.findall(r'##\s+<%page ', mako_template, re.IGNORECASE))
return max(0, count - count_commented)
def _has_page_default(self, mako_template, results):
"""
Checks if the Mako template contains the page expression marking it as
safe by default.
Arguments:
mako_template: The contents of the Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations regarding page default if necessary
Returns:
True if the template has the page default, and False otherwise.
"""
page_tag_count = self._get_page_tag_count(mako_template)
# check if there are too many page expressions
if 2 <= page_tag_count:
results.violations.append(RuleViolation(Rules.mako_multiple_page_tags))
return False
# make sure there is exactly 1 page expression, excluding commented out
# page expressions, before proceeding
elif page_tag_count != 1:
results.violations.append(RuleViolation(Rules.mako_missing_default))
return False
# check that safe by default (h filter) is turned on
page_h_filter_regex = re.compile('<%page[^>]*expression_filter=(?:"h"|\'h\')[^>]*/>')
page_match = page_h_filter_regex.search(mako_template)
if not page_match:
results.violations.append(RuleViolation(Rules.mako_missing_default))
return page_match
def _check_mako_expressions(self, mako_template, has_page_default, results):
"""
Searches for Mako expressions and then checks if they contain
violations, including checking JavaScript contexts for JavaScript
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
expressions = self._find_mako_expressions(mako_template)
contexts = self._get_contexts(mako_template)
self._check_javascript_contexts(mako_template, contexts, results)
for expression in expressions:
if expression.end_index is None:
results.violations.append(ExpressionRuleViolation(
Rules.mako_unparseable_expression, expression
))
continue
context = self._get_context(contexts, expression.start_index)
self._check_expression_and_filters(mako_template, expression, context, has_page_default, results)
def _check_javascript_contexts(self, mako_template, contexts, results):
"""
Lint the JavaScript contexts for JavaScript violations inside a Mako
template.
Arguments:
mako_template: The contents of the Mako template.
contexts: A list of context dicts with 'type' and 'index'.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_start_index = None
for context in contexts:
if context['type'] == 'javascript':
if javascript_start_index < 0:
javascript_start_index = context['index']
else:
if javascript_start_index is not None:
javascript_end_index = context['index']
javascript_code = mako_template[javascript_start_index:javascript_end_index]
self._check_javascript_context(javascript_code, javascript_start_index, results)
javascript_start_index = None
if javascript_start_index is not None:
javascript_code = mako_template[javascript_start_index:]
self._check_javascript_context(javascript_code, javascript_start_index, results)
def _check_javascript_context(self, javascript_code, start_offset, results):
"""
Lint a single JavaScript context for JavaScript violations inside a Mako
template.
Arguments:
javascript_code: The template contents of the JavaScript context.
start_offset: The offset of the JavaScript context inside the
original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_results = FileResults("")
self.javascript_linter.check_javascript_file_is_safe(javascript_code, javascript_results)
self._shift_and_add_violations(javascript_results, start_offset, results)
def _check_mako_python_blocks(self, mako_template, has_page_default, results):
"""
Searches for Mako python blocks and checks if they contain
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
# Finds Python blocks such as <% ... %>, skipping other Mako start tags
# such as <%def> and <%page>.
python_block_regex = re.compile(r'<%\s(?P<code>.*?)%>', re.DOTALL)
for python_block_match in python_block_regex.finditer(mako_template):
self._check_expression_python(
python_code=python_block_match.group('code'),
start_offset=(python_block_match.start() + len('<% ')),
has_page_default=has_page_default,
results=results
)
def _check_expression_python(self, python_code, start_offset, has_page_default, results):
"""
Lint the Python inside a single Python expression in a Mako template.
Arguments:
python_code: The Python contents of an expression.
start_offset: The offset of the Python content inside the original
Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
Side effect:
Adds Python violations to results.
"""
python_results = FileResults("")
# Dedent expression internals so it is parseable.
# Note that the final columns reported could be off somewhat.
adjusted_python_code = textwrap.dedent(python_code)
first_letter_match = re.search('\w', python_code)
adjusted_first_letter_match = re.search('\w', adjusted_python_code)
if first_letter_match is not None and adjusted_first_letter_match is not None:
start_offset += (first_letter_match.start() - adjusted_first_letter_match.start())
python_code = adjusted_python_code
root_node = self.python_linter.parse_python_code(python_code, python_results)
self.python_linter.check_python_code_is_safe(python_code, root_node, python_results)
# Check mako expression specific Python rules.
if root_node is not None:
visitor = HtmlStringVisitor(python_code, python_results, True)
visitor.visit(root_node)
for unsafe_html_string_node in visitor.unsafe_html_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
Rules.python_wrap_html, visitor.node_to_expression(unsafe_html_string_node)
))
if has_page_default:
for over_escaped_entity_string_node in visitor.over_escaped_entity_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
Rules.mako_html_entities, visitor.node_to_expression(over_escaped_entity_string_node)
))
python_results.prepare_results(python_code, line_comment_delim=self.LINE_COMMENT_DELIM)
self._shift_and_add_violations(python_results, start_offset, results)
def _shift_and_add_violations(self, other_linter_results, start_offset, results):
"""
Adds results from a different linter to the Mako results, after shifting
the offset into the original Mako template.
Arguments:
other_linter_results: Results from another linter.
start_offset: The offset of the linted code, a part of the template,
inside the original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations to results.
"""
# translate the violations into the proper location within the original
# Mako template
for violation in other_linter_results.violations:
expression = violation.expression
expression.start_index += start_offset
if expression.end_index is not None:
expression.end_index += start_offset
results.violations.append(ExpressionRuleViolation(violation.rule, expression))
def _check_expression_and_filters(self, mako_template, expression, context, has_page_default, results):
"""
Checks that the filters used in the given Mako expression are valid
for the given context. Adds violation to results if there is a problem.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
context: The context of the page in which the expression was found
(e.g. javascript, html).
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
if context == 'unknown':
results.violations.append(ExpressionRuleViolation(
Rules.mako_unknown_context, expression
))
return
# Example: finds "| n, h}" when given "${x | n, h}"
filters_regex = re.compile(r'\|([.,\w\s]*)\}')
filters_match = filters_regex.search(expression.expression)
# Check Python code inside expression.
if filters_match is None:
python_code = expression.expression[2:-1]
else:
python_code = expression.expression[2:filters_match.start()]
self._check_expression_python(python_code, expression.start_index + 2, has_page_default, results)
# Check filters.
if filters_match is None:
if context == 'javascript':
results.violations.append(ExpressionRuleViolation(
Rules.mako_invalid_js_filter, expression
))
return
filters = filters_match.group(1).replace(" ", "").split(",")
if filters == ['n', 'decode.utf8']:
# {x | n, decode.utf8} is valid in any context
pass
elif context == 'html':
if filters == ['h']:
if has_page_default:
# suppress this violation if the page default hasn't been set,
# otherwise the template might get less safe
results.violations.append(ExpressionRuleViolation(
Rules.mako_unwanted_html_filter, expression
))
else:
results.violations.append(ExpressionRuleViolation(
Rules.mako_invalid_html_filter, expression
))
elif context == 'javascript':
self._check_js_expression_not_with_html(mako_template, expression, results)
if filters == ['n', 'dump_js_escaped_json']:
# {x | n, dump_js_escaped_json} is valid
pass
elif filters == ['n', 'js_escaped_string']:
# {x | n, js_escaped_string} is valid, if surrounded by quotes
self._check_js_string_expression_in_quotes(mako_template, expression, results)
else:
results.violations.append(ExpressionRuleViolation(
Rules.mako_invalid_js_filter, expression
))
def _check_js_string_expression_in_quotes(self, mako_template, expression, results):
"""
Checks that a Mako expression using js_escaped_string is surrounded by
quotes.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is None:
results.violations.append(ExpressionRuleViolation(
Rules.mako_js_missing_quotes, expression
))
def _check_js_expression_not_with_html(self, mako_template, expression, results):
"""
Checks that a Mako expression in a JavaScript context does not appear in
a string that also contains HTML.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is not None and re.search('[<>]', parse_string.string) is not None:
results.violations.append(ExpressionRuleViolation(
Rules.mako_js_html_string, expression
))
def _find_string_wrapping_expression(self, mako_template, expression):
"""
Finds the string wrapping the Mako expression if there is one.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
Returns:
ParseString representing a scrubbed version of the wrapped string,
where the Mako expression was replaced with "${...}", if a wrapped
string was found. Otherwise, returns None if none found.
"""
lines = StringLines(mako_template)
start_index = lines.index_to_line_start_index(expression.start_index)
if expression.end_index is not None:
end_index = lines.index_to_line_end_index(expression.end_index)
else:
return None
# scrub out the actual expression so any code inside the expression
# doesn't interfere with rules applied to the surrounding code (i.e.
# checking JavaScript).
scrubbed_lines = "".join((
mako_template[start_index:expression.start_index],
"${...}",
mako_template[expression.end_index:end_index]
))
adjusted_start_index = expression.start_index - start_index
start_index = 0
while True:
parse_string = ParseString(scrubbed_lines, start_index, len(scrubbed_lines))
# check for validly parsed string
if 0 <= parse_string.start_index < parse_string.end_index:
# check if expression is contained in the given string
if parse_string.start_index < adjusted_start_index < parse_string.end_index:
return parse_string
else:
# move to check next string
start_index = parse_string.end_index
else:
break
return None
def _get_contexts(self, mako_template):
"""
Returns a data structure that represents the indices at which the
template changes from HTML context to JavaScript and back.
Return:
A list of dicts where each dict contains:
- index: the index of the context.
- type: the context type (e.g. 'html' or 'javascript').
"""
contexts_re = re.compile(
r"""
<script.*?> | # script tag start
</script> | # script tag end
<%static:require_module(_async)?.*?> | # require js script tag start (optionally the _async version)
</%static:require_module(_async)?> | # require js script tag end (optionally the _async version)
<%block[ ]*name=['"]requirejs['"]\w*> | # require js tag start
</%block> # require js tag end
""",
re.VERBOSE | re.IGNORECASE
)
media_type_re = re.compile(r"""type=['"].*?['"]""", re.IGNORECASE)
contexts = [{'index': 0, 'type': 'html'}]
javascript_types = [
'text/javascript', 'text/ecmascript', 'application/ecmascript', 'application/javascript',
'text/x-mathjax-config', 'json/xblock-args'
]
html_types = ['text/template']
for context in contexts_re.finditer(mako_template):
match_string = context.group().lower()
if match_string.startswith("<script"):
match_type = media_type_re.search(match_string)
context_type = 'javascript'
if match_type is not None:
# get media type (e.g. get text/javascript from
# type="text/javascript")
match_type = match_type.group()[6:-1].lower()
if match_type in html_types:
context_type = 'html'
elif match_type not in javascript_types:
context_type = 'unknown'
contexts.append({'index': context.end(), 'type': context_type})
elif match_string.startswith("</"):
contexts.append({'index': context.start(), 'type': 'html'})
else:
contexts.append({'index': context.end(), 'type': 'javascript'})
return contexts
def _get_context(self, contexts, index):
"""
Gets the context (e.g. javascript, html) of the template at the given
index.
Arguments:
contexts: A list of dicts where each dict contains the 'index' of the context
and the context 'type' (e.g. 'html' or 'javascript').
index: The index for which we want the context.
Returns:
The context (e.g. javascript or html) for the given index.
"""
current_context = contexts[0]['type']
for context in contexts:
if context['index'] <= index:
current_context = context['type']
else:
break
return current_context
def _find_mako_expressions(self, mako_template):
"""
Finds all the Mako expressions in a Mako template and creates a list
of dicts for each expression.
Arguments:
mako_template: The content of the Mako template.
Returns:
A list of Expressions.
"""
start_delim = '${'
start_index = 0
expressions = []
while True:
start_index = mako_template.find(start_delim, start_index)
if start_index < 0:
break
# If start of mako expression is commented out, skip it.
uncommented_start_index = self._uncommented_start_index(mako_template, start_index)
if uncommented_start_index != start_index:
start_index = uncommented_start_index
continue
result = self._find_closing_char_index(
start_delim, '{', '}', mako_template, start_index=start_index + len(start_delim)
)
if result is None:
expression = Expression(start_index)
# for parsing error, restart search right after the start of the
# current expression
start_index = start_index + len(start_delim)
else:
close_char_index = result['close_char_index']
expression = mako_template[start_index:close_char_index + 1]
expression = Expression(
start_index,
end_index=close_char_index + 1,
template=mako_template,
start_delim=start_delim,
end_delim='}',
strings=result['strings'],
)
# restart search after the current expression
start_index = expression.end_index
expressions.append(expression)
return expressions
SKIP_DIRS = (
'.git',
'.pycharm_helpers',
'common/static/xmodule/modules',
'perf_tests',
'node_modules',
'reports/diff_quality',
'scripts/tests/templates',
'spec',
'test_root',
'vendor',
)
def is_skip_dir(skip_dirs, directory):
"""
Determines whether a directory should be skipped or linted.
Arguments:
skip_dirs: The configured directories to be skipped.
directory: The current directory to be tested.
Returns:
True if the directory should be skipped, and False otherwise.
"""
for skip_dir in skip_dirs:
skip_dir_regex = re.compile(
"(.*/)*{}(/.*)*".format(re.escape(skip_dir)))
if skip_dir_regex.match(directory) is not None:
return True
return False
def _process_file(full_path, template_linters, options, summary_results, out):
"""
For each linter, lints the provided file. This means finding and printing
violations.
Arguments:
full_path: The full path of the file to lint.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
num_violations = 0
directory = os.path.dirname(full_path)
file_name = os.path.basename(full_path)
for template_linter in template_linters:
results = template_linter.process_file(directory, file_name)
results.print_results(options, summary_results, out)
def _process_os_dir(directory, files, template_linters, options, summary_results, out):
"""
Calls out to lint each file in the passed list of files.
Arguments:
directory: Directory being linted.
files: All files in the directory to be linted.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
for current_file in sorted(files, key=lambda s: s.lower()):
full_path = os.path.join(directory, current_file)
_process_file(full_path, template_linters, options, summary_results, out)
def _process_os_dirs(starting_dir, template_linters, options, summary_results, out):
"""
For each linter, lints all the directories in the starting directory.
Arguments:
starting_dir: The initial directory to begin the walk.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
for root, dirs, files in os.walk(starting_dir):
if is_skip_dir(SKIP_DIRS, root):
del dirs
continue
dirs.sort(key=lambda s: s.lower())
_process_os_dir(root, files, template_linters, options, summary_results, out)
def _lint(file_or_dir, template_linters, options, summary_results, out):
"""
For each linter, lints the provided file or directory.
Arguments:
file_or_dir: The file or initial directory to lint.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
if file_or_dir is not None and os.path.isfile(file_or_dir):
_process_file(file_or_dir, template_linters, options, summary_results, out)
else:
directory = "."
if file_or_dir is not None:
if os.path.exists(file_or_dir):
directory = file_or_dir
else:
raise ValueError("Path [{}] is not a valid file or directory.".format(file_or_dir))
_process_os_dirs(directory, template_linters, options, summary_results, out)
summary_results.print_results(options, out)
def main():
"""
Used to execute the linter. Use --help option for help.
Prints all violations.
"""
epilog = "For more help using the safe template linter, including details on how\n"
epilog += "to understand and fix any violations, read the docs here:\n"
epilog += "\n"
# pylint: disable=line-too-long
epilog += " http://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/safe_templates.html#safe-template-linter\n"
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Checks that templates are safe.',
epilog=epilog,
)
parser.add_argument(
'--list-files', dest='list_files', action='store_true',
help='Only display the filenames that contain violations.'
)
parser.add_argument(
'--rule-totals', dest='rule_totals', action='store_true',
help='Display the totals for each rule.'
)
parser.add_argument(
'--verbose', dest='verbose', action='store_true',
help='Print multiple lines where possible for additional context of violations.'
)
parser.add_argument('path', nargs="?", default=None, help='A file to lint or directory to recursively lint.')
args = parser.parse_args()
options = {
'list_files': args.list_files,
'rule_totals': args.rule_totals,
'verbose': args.verbose,
}
template_linters = [MakoTemplateLinter(), UnderscoreTemplateLinter(), JavaScriptLinter(), PythonLinter()]
summary_results = SummaryResults()
_lint(args.path, template_linters, options, summary_results, out=sys.stdout)
if __name__ == "__main__":
main()
| Learningtribes/edx-platform | scripts/safe_template_linter.py | Python | agpl-3.0 | 101,020 | [
"VisIt"
] | 1b6f1111fce8bb6cefbcc3d3f94c341a16bbeebcf70d0bb59948ab4b0c6ec37d |
#*******************************************************************
# * File: merge.py
# * Description:
# * Author: HarshaRani
# * E-mail: hrani@ncbs.res.in
# ********************************************************************/
# **********************************************************************
#** This program is part of 'MOOSE', the
#** Messaging Object Oriented Simulation Environment,
#** also known as GENESIS 3 base code.
#** copyright (C) 2003-2017 Upinder S. Bhalla. and NCBS
#Created : Friday Dec 16 23:19:00 2016(+0530)
#Version
#Last-Updated: Tuesday Feb 28 15:05:33 2017(+0530)
# By: Harsha
#**********************************************************************/
# This program is used to merge models from src to destination
#Rules are :
# -- If Compartment from the src model doesn't exist in destination model,
# then entire compartment and its children are copied over including groups
# -- Models are mergered at group level (if exists)
# (Group is Neutral object in moose, which may represent pathway in network model)
# -- Pool's are copied from source to destination if it doesn't exist, if exist nothing is done
# -- Reaction (Reac), Enzyme (Enz) are copied
# --- if any danglling Reac or Enz exist then that is not copied
#
# --- if Reac Name's is different for a given path (group level)
# then copy the entire Reac along with substrate/product
# --- if same Reac Name and same sub and prd then nothing is copied
# --- if same Reac Name but sub or prd is different then duplicated and copied
#
# --- if Enz Name's is different for a given parent pool path
# then copy the entire Enz along with substrate/product
# --- if same Enz Name and same sub and prd then nothing is copied
# --- if same Enz Name but sub or prd is different then duplicated and copied
# -- Function are copied only if destination pool to which its suppose to connect doesn't exist with function of its own
#
import sys
import os
#from . import _moose as moose
import moose
import mtypes
from moose.chemUtil.chemConnectUtil import *
from moose.chemUtil.graphUtils import *
def mergeChemModel(src,des):
""" Merges two model or the path """
A = src
B = des
loadedA = False
loadedB = False
if os.path.isfile(A):
modelA,loadedA = loadModels(A)
elif moose.exists(A):
modelA = A
loadedA = True
else:
print ("%s path or file doesnot exists. Mergering will exist" % (A))
exit(0)
if os.path.isfile(B):
modelB,loadedB = loadModels(B)
elif moose.exists(B):
modelB = B
loadedB = True
else:
print ("%s path or file doesnot exists. Mergering will exist " % (B))
exit(0)
if loadedA and loadedB:
## yet deleteSolver is called to make sure all the moose object are off from solver
deleteSolver(modelA)
deleteSolver(modelB)
global poolListina
poolListina = {}
grpNotcopiedyet = []
dictComptA = dict( [ (i.name,i) for i in moose.wildcardFind(modelA+'/##[ISA=ChemCompt]') ] )
dictComptB = dict( [ (i.name,i) for i in moose.wildcardFind(modelB+'/##[ISA=ChemCompt]') ] )
poolNotcopiedyet = []
for key in list(dictComptA.keys()):
if key not in dictComptB:
# if compartmentname from modelB does not exist in modelA, then copy
copy = moose.copy(dictComptA[key],moose.element(modelB))
else:
#if compartmentname from modelB exist in modelA,
#volume is not same, then change volume of ModelB same as ModelA
if abs(dictComptB[key].volume - dictComptA[key].volume):
#hack for now
while (abs(dictComptB[key].volume - dictComptA[key].volume) != 0.0):
dictComptA[key].volume = float(dictComptB[key].volume)
dictComptB = dict( [ (i.name,i) for i in moose.wildcardFind(modelB+'/##[ISA=ChemCompt]') ] )
#Mergering pool
poolMerge(dictComptB[key],dictComptA[key],poolNotcopiedyet)
if grpNotcopiedyet:
# objA = moose.element(comptA).parent.name
# if not moose.exists(objA+'/'+comptB.name+'/'+bpath.name):
# print bpath
# moose.copy(bpath,moose.element(objA+'/'+comptB.name))
pass
comptBdict = comptList(modelB)
poolListinb = {}
poolListinb = updatePoolList(comptBdict)
R_Duplicated, R_Notcopiedyet,R_Daggling = [], [], []
E_Duplicated, E_Notcopiedyet,E_Daggling = [], [], []
for key in list(dictComptA.keys()):
funcExist, funcNotallowed = [], []
funcExist,funcNotallowed = functionMerge(dictComptB,dictComptA,key)
poolListinb = updatePoolList(dictComptB)
R_Duplicated,R_Notcopiedyet,R_Daggling = reacMerge(dictComptB,dictComptA,key,poolListinb)
poolListinb = updatePoolList(dictComptB)
E_Duplicated,E_Notcopiedyet,E_Daggling = enzymeMerge(dictComptB,dictComptA,key,poolListinb)
path, sfile = os.path.split(src)
path, dfile = os.path.split(des)
print("\n %s (src) model is merged to %s (des)" %(sfile, dfile))
if funcExist:
print( "\nIn model \"%s\" pool already has connection from a function, these function from model \"%s\" is not allowed to connect to same pool,\n since no two function are allowed to connect to same pool:"%(dfile, sfile))
for fl in list(funcExist):
print("\t [Pool]: %s [Function]: %s \n" %(str(fl.parent.name), str(fl.path)))
if funcNotallowed:
print( "\nThese functions is not to copied, since pool connected to function input are from different compartment:")
for fl in list(funcNotallowed):
print("\t [Pool]: %s [Function]: %s \n" %(str(fl.parent.name), str(fl.path)))
if R_Duplicated or E_Duplicated:
print ("These Reaction / Enzyme are \"Duplicated\" into destination file \"%s\", due to "
"\n 1. If substrate / product name's are different for a give reaction/Enzyme name "
"\n 2. If product belongs to different compartment "
"\n Models have to decide to keep or delete these reaction/enzyme in %s" %(dfile, dfile))
if E_Duplicated:
print("Reaction: ")
for rd in list(R_Duplicated):
print ("%s " %str(rd.name))
if E_Duplicated:
print ("Enzyme:")
for ed in list(E_Duplicated):
print ("%s " %str(ed.name))
if R_Notcopiedyet or E_Notcopiedyet:
print ("\nThese Reaction/Enzyme in model are not dagging but while copying the associated substrate or product is missing")
if R_Notcopiedyet:
print("Reaction: ")
for rd in list(R_Notcopiedyet):
print ("%s " %str(rd.name))
if E_Notcopiedyet:
print ("Enzyme:")
for ed in list(E_Notcopiedyet):
print ("%s " %str(ed.name))
if R_Daggling or E_Daggling:
print ("\n Daggling reaction/enzyme are not allowed in moose, these are not merged to %s from %s" %(dfile, sfile))
if R_Daggling:
print("Reaction: ")
for rd in list(R_Daggling):
print ("%s " %str(rd.name))
if E_Daggling:
print ("Enzyme:")
for ed in list(E_Daggling):
print ("%s " %str(ed.name))
def functionMerge(comptA,comptB,key):
funcNotallowed, funcExist = [], []
comptApath = moose.element(comptA[key]).path
comptBpath = moose.element(comptB[key]).path
objA = moose.element(comptApath).parent.name
objB = moose.element(comptBpath).parent.name
#This will give us all the function which exist in modelB
funcListinb = moose.wildcardFind(comptBpath+'/##[ISA=Function]')
for fb in funcListinb:
#This will give us all the pools that its connected to, for this function
fvalueOut = moose.element(fb).neighbors['valueOut']
for poolinB in fvalueOut:
poolinBpath = poolinB.path
poolinA = poolinBpath.replace(objB,objA)
connectionexist = []
if moose.exists(poolinA):
#This is give us if pool which is to be connected already exist any connection
connectionexist = moose.element(poolinA).neighbors['setN']+moose.element(poolinA).neighbors['setConc']+ moose.element(poolinA).neighbors['increment']
if len(connectionexist) == 0:
#This pool in model A doesnot exist with any function
inputs = moose.element(fb.path+'/x').neighbors['input']
volumes = []
for ins in inputs:
volumes.append((findCompartment(moose.element(ins))).volume)
if len(set(volumes)) == 1:
# If all the input connected belongs to one compartment then copy
createFunction(fb,poolinA,objB,objA)
else:
# moose doesn't allow function's input to come from different compartment
funcNotallowed.append(fb)
else:
#Pool in model 'A' already exist function "
funcExist.append(fb)
else:
print(" Path in model A doesn't exists %s" %(poolinA))
return funcExist,funcNotallowed
def createFunction(fb,setpool,objB,objA):
fapath1 = fb.path.replace(objB,objA)
fapath = fapath1.replace('[0]','')
if not moose.exists(fapath):
# if fb.parent.className in ['CubeMesh','CyclMesh']:
# des = moose.Function('/'+objA+'/'+fb.parent.name+'/'+fb.name)
# elif fb.parent.className in ['Pool','ZombiePool','BufPool','ZombieBufPool']:
# for akey in list(poolListina[findCompartment(fb).name]):
# if fb.parent.name == akey.name:
# des = moose.Function(akey.path+'/'+fb.name)
des = moose.Function(fapath)
else:
des = moose.element(fapath)
inputB = moose.element(fb.path+'/x').neighbors["input"]
moose.connect(des, 'valueOut', moose.element(setpool),'setN' )
inputA = []
inputA = moose.element(fapath+'/x').neighbors["input"]
if not inputA:
for src in inputB:
pool = ((src.path).replace(objB,objA)).replace('[0]','')
numVariables = des.numVars
expr = ""
expr = (des.expr+'+'+'x'+str(numVariables))
expr = expr.lstrip("0 +")
expr = expr.replace(" ","")
des.expr = expr
moose.connect( pool, 'nOut', des.x[numVariables], 'input' )
def comptList(modelpath):
comptdict = {}
for ca in moose.wildcardFind(modelpath+'/##[ISA=ChemCompt]'):
comptdict[ca.name] = ca
return comptdict
def loadModels(filepath):
""" load models into moose if file, if moosepath itself it passes back the path and
delete solver if exist """
modelpath = '/'
loaded = False
if os.path.isfile(filepath) :
fpath, filename = os.path.split(filepath)
# print " head and tail ",head, " ",tail
# modelpath = filename[filename.rfind('/'): filename.rfind('.')]
# print "modelpath ",modelpath
# ext = os.path.splitext(filename)[1]
# filename = filename.strip()
modelpath = '/'+filename[:filename.rfind('.')]
modeltype = mtypes.getType(filepath)
subtype = mtypes.getSubtype(filepath, modeltype)
if subtype == 'kkit' or modeltype == "cspace":
moose.loadModel(filepath,modelpath)
loaded = True
elif subtype == 'sbml':
#moose.mooseReadSBML(filename,modelpath)
#loaded = True
pass
else:
print("This file is not supported for mergering")
modelpath = moose.Shell('/')
elif moose.exists(filepath):
modelpath = filepath
loaded = True
return modelpath,loaded
def deleteSolver(modelRoot):
compts = moose.wildcardFind(modelRoot+'/##[ISA=ChemCompt]')
for compt in compts:
if moose.exists(compt.path+'/stoich'):
st = moose.element(compt.path+'/stoich')
st_ksolve = st.ksolve
moose.delete(st)
if moose.exists((st_ksolve).path):
moose.delete(st_ksolve)
def poolMerge(comptA,comptB,poolNotcopiedyet):
aCmptGrp = moose.wildcardFind(comptA.path+'/#[TYPE=Neutral]')
aCmptGrp = aCmptGrp +(moose.element(comptA.path),)
bCmptGrp = moose.wildcardFind(comptB.path+'/#[TYPE=Neutral]')
bCmptGrp = bCmptGrp +(moose.element(comptB.path),)
objA = moose.element(comptA.path).parent.name
objB = moose.element(comptB.path).parent.name
for bpath in bCmptGrp:
grp_cmpt = ((bpath.path).replace(objB,objA)).replace('[0]','')
if moose.exists(grp_cmpt) :
if moose.element(grp_cmpt).className != bpath.className:
grp_cmpt = grp_cmpt+'_grp'
bpath.name = bpath.name+"_grp"
l = moose.Neutral(grp_cmpt)
else:
moose.Neutral(grp_cmpt)
apath = moose.element(bpath.path.replace(objB,objA))
bpoollist = moose.wildcardFind(bpath.path+'/#[ISA=PoolBase]')
apoollist = moose.wildcardFind(apath.path+'/#[ISA=PoolBase]')
for bpool in bpoollist:
if bpool.name not in [apool.name for apool in apoollist]:
copied = copy_deleteUnlyingPoolObj(bpool,apath)
if copied == False:
#hold it for later, this pool may be under enzyme, as cplx
poolNotcopiedyet.append(bpool)
def copy_deleteUnlyingPoolObj(pool,path):
# check if this pool is under compartement or under enzyme?(which is enzyme_cplx)
# if enzyme_cplx then don't copy untill this perticular enzyme is copied
# case: This enzyme_cplx might exist in modelA if enzyme exist
# which will automatically copie's the pool
copied = False
if pool.parent.className not in ["Enz","ZombieEnz","MMenz","ZombieMMenz"]:
poolcopied = moose.copy(pool,path)
copied = True
# deleting function and enzyme which gets copied if exist under pool
# This is done to ensure daggling function / enzyme not copied.
funclist = []
for types in ['setConc','setN','increment']:
funclist.extend(moose.element(poolcopied).neighbors[types])
for fl in funclist:
moose.delete(fl)
enzlist = moose.element(poolcopied).neighbors['reac']
for el in list(set(enzlist)):
moose.delete(el.path)
return copied
def updatePoolList(comptAdict):
for key,value in list(comptAdict.items()):
plist = moose.wildcardFind(value.path+'/##[ISA=PoolBase]')
poolListina[key] = plist
return poolListina
def enzymeMerge(comptA,comptB,key,poolListina):
war_msg = ""
RE_Duplicated, RE_Notcopiedyet, RE_Daggling = [], [], []
comptApath = moose.element(comptA[key]).path
comptBpath = moose.element(comptB[key]).path
objA = moose.element(comptApath).parent.name
objB = moose.element(comptBpath).parent.name
enzyListina = moose.wildcardFind(comptApath+'/##[ISA=EnzBase]')
enzyListinb = moose.wildcardFind(comptBpath+'/##[ISA=EnzBase]')
for eb in enzyListinb:
eBsubname, eBprdname = [],[]
eBsubname = subprdList(eb,"sub")
eBprdname = subprdList(eb,"prd")
allexists, allexistp = False, False
allclean = False
poolinAlist = poolListina[findCompartment(eb).name]
for pA in poolinAlist:
if eb.parent.name == pA.name:
eapath = eb.parent.path.replace(objB,objA)
if not moose.exists(eapath+'/'+eb.name):
#This will take care
# -- If same enzparent name but different enzyme name
# -- or different parent/enzyme name
if eBsubname and eBprdname:
allexists = checkexist(eBsubname,objB,objA)
allexistp = checkexist(eBprdname,objB,objA)
if allexists and allexistp:
enzPool = moose.element(pA.path)
eapath = eb.parent.path.replace(objB,objA)
enz = moose.element(moose.copy(eb,moose.element(eapath)))
enzPool = enz.parent
if eb.className in ["ZombieEnz","Enz"]:
moose.connect(moose.element(enz),"enz",enzPool,"reac")
if eb.className in ["ZombieMMenz","MMenz"]:
moose.connect(enzPool,"nOut",enz,"enzDest")
connectObj(enz,eBsubname,"sub",comptA,war_msg)
connectObj(enz,eBprdname,"prd",comptA,war_msg)
allclean = True
else:
# didn't find sub or prd for this Enzyme
RE_Notcopiedyet.append(eb)
else:
# -- it is dagging reaction
RE_Daggling.append(eb)
else:
#Same Enzyme name
# -- Same substrate and product including same volume then don't copy
# -- different substrate/product or if sub/prd's volume is different then DUPLICATE the Enzyme
allclean = False
ea = moose.element(eb.path.replace(objB,objA))
eAsubname = subprdList(ea,"sub")
eBsubname = subprdList(eb,"sub")
hasSamenoofsublen,hasSameS,hasSamevols = same_len_name_vol(eAsubname,eBsubname)
eAprdname = subprdList(ea,"prd")
eBprdname = subprdList(eb,"prd")
hasSamenoofprdlen,hasSameP,hasSamevolp = same_len_name_vol(eAprdname,eBprdname)
if not all((hasSamenoofsublen,hasSameS,hasSamevols,hasSamenoofprdlen,hasSameP,hasSamevolp)):
# May be different substrate or product or volume of Sub/prd may be different,
# Duplicating the enzyme
if eBsubname and eBprdname:
allexists,allexistp = False,False
allexists = checkexist(eBsubname,objB,objA)
allexistp = checkexist(eBprdname,objB,objA)
if allexists and allexistp:
eb.name = eb.name+"_duplicated"
if eb.className in ["ZombieEnz","Enz"]:
eapath = eb.parent.path.replace(objB,objA)
enz = moose.copy(eb,moose.element(eapath))
moose.connect(enz, 'enz', eapath, 'reac' )
if eb.className in ["ZombieMMenz","MMenz"]:
eapath = eb.parent.path.replace(objB,objA)
enz = moose.copy(eb.name,moose.element(eapath))
enzinfo = moose.Annotator(enz.path+'/info')
moose.connect(moose.element(enz).parent,"nOut",moose.element(enz),"enzDest")
#moose.connect(moose.element(enz),"enz",moose.element(enz).parent,"reac")
connectObj(enz,eBsubname,"sub",comptA,war_msg)
connectObj(enz,eBprdname,"prd",comptA,war_msg)
RE_Duplicated.append(enz)
allclean = True
else:
allclean = False
else:
allclean = True
if not allclean:
# didn't find sub or prd for this enzyme
# -- it may be connected Enzyme cplx
if eBsubname and eBprdname:
RE_Notcopiedyet.append(eb)
else:
RE_Daggling.append(eb)
return RE_Duplicated,RE_Notcopiedyet,RE_Daggling
def reacMerge(comptA,comptB,key,poolListina):
RE_Duplicated, RE_Notcopiedyet, RE_Daggling = [], [], []
war_msg = ""
comptApath = moose.element(comptA[key]).path
comptBpath = moose.element(comptB[key]).path
objA = moose.element(comptApath).parent.name
objB = moose.element(comptBpath).parent.name
reacListina = moose.wildcardFind(comptApath+'/##[ISA=ReacBase]')
reacListinb = moose.wildcardFind(comptBpath+'/##[ISA=ReacBase]')
for rb in reacListinb:
rBsubname, rBprdname = [],[]
rBsubname = subprdList(rb,"sub")
rBprdname = subprdList(rb,"prd")
allexists, allexistp = False, False
allclean = False
if rb.name not in [ra.name for ra in reacListina]:
# reaction name not found then copy
# And assuming that pools are copied earlier EXPECT POOL CPLX
#To be assured the it takes correct compartment name incase reaction sub's
#belongs to different compt
key = findCompartment(rb).name
if rBsubname and rBprdname:
allexists = checkexist(rBsubname,objB,objA)
allexistp = checkexist(rBprdname,objB,objA)
if allexists and allexistp:
rapath = rb.parent.path.replace(objB,objA)
reac = moose.copy(rb,moose.element(rapath))
connectObj(reac,rBsubname,"sub",comptA,war_msg)
connectObj(reac,rBprdname,"prd",comptA,war_msg)
allclean = True
else:
# didn't find sub or prd for this reaction
# -- it may be connected Enzyme cplx
RE_Notcopiedyet.append(rb)
else:
# -- it is dagging reaction
RE_Daggling.append(rb)
#print ("This reaction \""+rb.path+"\" has no substrate/product daggling reaction are not copied")
#war_msg = war_msg+"\nThis reaction \""+rb.path+"\" has no substrate/product daggling reaction are not copied"
else:
#Same reaction name
# -- Same substrate and product including same volume then don't copy
# -- different substrate/product or if sub/prd's volume is different then DUPLICATE the reaction
allclean = False
for ra in reacListina:
if rb.name == ra.name:
rAsubname = subprdList(ra,"sub")
rBsubname = subprdList(rb,"sub")
hasSamenoofsublen,hasSameS,hasSamevols = same_len_name_vol(rAsubname,rBsubname)
rAprdname = subprdList(ra,"prd")
rBprdname = subprdList(rb,"prd")
hasSamenoofprdlen,hasSameP,hasSamevolp = same_len_name_vol(rAprdname,rBprdname)
if not all((hasSamenoofsublen,hasSameS,hasSamevols,hasSamenoofprdlen,hasSameP,hasSamevolp)):
# May be different substrate or product or volume of Sub/prd may be different,
# Duplicating the reaction
if rBsubname and rBprdname:
allexists,allexistp = False,False
allexists = checkexist(rBsubname,objB,objA)
allexistp = checkexist(rBprdname,objB,objA)
if allexists and allexistp:
rb.name = rb.name+"_duplicated"
#reac = moose.Reac(comptA[key].path+'/'+rb.name+"_duplicated")
rapath = rb.parent.path.replace(objB,objA)
reac = moose.copy(rb,moose.element(rapath))
connectObj(reac,rBsubname,"sub",comptA,war_msg)
connectObj(reac,rBprdname,"prd",comptA,war_msg)
RE_Duplicated.append(reac)
allclean = True
else:
allclean = False
else:
allclean = True
if not allclean:
# didn't find sub or prd for this reaction
# -- it may be connected Enzyme cplx
if rBsubname and rBprdname:
RE_Notcopiedyet.append(rb)
else:
RE_Daggling.append(rb)
return RE_Duplicated,RE_Notcopiedyet,RE_Daggling
def subprdList(reac,subprd):
rtype = moose.element(reac).neighbors[subprd]
rname = []
for rs in rtype:
rname.append(moose.element(rs))
return rname
def same_len_name_vol(rA,rB):
uaS = set(rA)
ubS = set(rB)
aS = set([uas.name for uas in uaS])
bS = set([ubs.name for ubs in ubS])
hassameLen = False
hassameSP = False
hassamevol = False
hassamevollist = []
if (len(rA) == len(rB) ):
hassameLen = True
if not (len (aS.union(bS) - aS.intersection(bS))):
hassameSP = True
if rB and rA:
rAdict = dict( [ (i.name,i) for i in (rA) ] )
rBdict = dict( [ (i.name,i) for i in (rB) ] )
for key,bpath in rBdict.items():
apath = rAdict[key]
comptA = moose.element(findCompartment(apath))
comptB = moose.element(findCompartment(bpath))
if not abs(comptA.volume -comptB.volume):
hassamevollist.append(True)
else:
hassamevollist.append(False)
if len(set(hassamevollist))==1:
for x in set(hassamevollist):
hassamevol = x
return ( hassameLen,hassameSP,hassamevol)
def connectObj(reac,spList,spType,comptA,war_msg):
#It should not come here unless the sub/prd is connected to enzyme cplx pool
allclean = False
for rsp in spList:
for akey in list(poolListina[findCompartment(rsp).name]):
if rsp.name == akey.name:
if moose.exists(akey.path):
moose.connect(moose.element(reac), spType, moose.element(akey), 'reac', 'OneToOne')
allclean = True
else:
#It should not come here unless the sub/prd is connected to enzyme cplx pool
allclean = False
return allclean
def checkexist(spList,objB,objA):
allexistL = []
allexist = False
for rsp in spList:
found = False
rspPath = rsp.path.replace(objB,objA)
if moose.exists(rspPath):
found = True
allexistL.append(found)
if len(set(allexistL))==1:
for x in set(allexistL):
allexist = x
return allexist
def findCompartment(element):
while not mooseIsInstance(element,["CubeMesh","CyclMesh"]):
element = element.parent
return element
def mooseIsInstance(element, classNames):
return moose.element(element).__class__.__name__ in classNames
if __name__ == "__main__":
try:
sys.argv[1]
except IndexError:
print("Source filename or path not given")
exit(0)
else:
src = sys.argv[1]
if not os.path.exists(src):
print("Filename or path does not exist",src)
else:
try:
sys.argv[2]
except IndexError:
print("Destination filename or path not given")
exit(0)
else:
des = sys.argv[2]
if not os.path.exists(src):
print("Filename or path does not exist",des)
exit(0)
mergered = mergeChemModel(src,des) | subhacom/moose-core | python/moose/merge/merge.py | Python | gpl-3.0 | 28,816 | [
"MOOSE"
] | 7cf8fccedd6519f0fc0434901d1de5ebbae4ab7333977699c17b1a1a888d9ac8 |
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
readme_filename = os.path.join(os.path.dirname(__file__), 'README.md')
try:
with open(readme_filename, 'r') as f:
readme = f.read()
except:
print "Failed to load README file"
readme = ""
try:
import pypandoc
readme = pypandoc.convert(readme, to='rst', format='md')
except:
print "Conversion of long_description from markdown to reStructuredText failed, skipping..."
from setuptools import setup
if __name__ == '__main__':
setup(
name='pepdata',
version="0.4.0",
description="Python interface to IEDB and other immune epitope data",
author="Alex Rubinsteyn",
author_email="alex {dot} rubinsteyn {at} mssm {dot} edu",
url="https://github.com/hammerlab/pepdata",
license="http://www.apache.org/licenses/LICENSE-2.0.html",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
'numpy>=1.7',
'pandas>=0.13.1',
'scikit-learn>=0.14.1',
'progressbar',
'biopython',
'datacache',
],
long_description=readme,
packages=['pepdata'],
package_data = { 'pepdata' : ['data/*csv'] },
include_package_data = True
)
| cpcloud/pepdata | setup.py | Python | apache-2.0 | 2,182 | [
"Biopython"
] | 0b5dbbd2d97b3afd07373dc5f18b6bbeac5941542b28204687aeeeca9d23ffbb |
#!/usr/bin/env python3
# INSert membrANE
# A simple, versatile tool for building coarse-grained simulation systems
# Copyright (C) 2017 Tsjerk A. Wassenaar and contributors
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import copy
from collections import namedtuple
import contextlib
import math
import os
import shutil
import sys
import tempfile
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
# GRO file format description. The key is the name of the field, the value is a
# tuple from which the first element is the first (included) and last
# (excluded) indices of the field in the line, and the second element the type
# of the field content.
GRO_FIELDS = {
"resid": ((0, 5), int),
"resname": ((5, 10), str),
"name": ((10, 15), str),
"index": ((15, 20), int),
"x": ((20, 28), float),
"y": ((28, 36), float),
"z": ((36, 44), float),
}
GRO_TEMPLATE = ('{resid:>5}{resname:<5}{name:>5}{index:>5}'
'{x:8.3f}{y:8.3f}{z:8.3f}')
GroDiff = namedtuple('GroDiff', 'linenum line ref_line fields')
class ContextStringIO(StringIO):
"""
StringIO but usable as a context manager.
StringIO can not completely pass as a file, because it is not usable as a
context manager in a 'with' statement. This class adds the context manager
ability to StringIO. It does nothing when the context manager is either
entered or exited, but it can be used in a 'with' statement.
"""
def __enter__(self):
"""
Does nothing when entering a 'with' statement.
"""
pass
def __exit__(self, *args):
"""
Does nothing when exiting a 'with' statement.
"""
pass
@contextlib.contextmanager
def in_directory(dirpath):
return_dir = os.getcwd()
try:
os.chdir(dirpath)
yield dirpath
finally:
os.chdir(return_dir)
@contextlib.contextmanager
def tempdir():
"""
Context manager that moves in a temporary directory.
.. code::
# We are in the initial working directory
with tempdir():
# We are now in a temporary directory
...
# We are back to the initial working directory.
# The temporary does not exist anymore.
"""
dirpath = tempfile.mkdtemp()
try:
with in_directory(dirpath):
yield dirpath
finally:
shutil.rmtree(dirpath)
# realpath and which are copied from MDAnalysis.
# MDAnalysis is released under the GPL v2 license.
# Read the full license at
# <https://github.com/MDAnalysis/mdanalysis/blob/develop/LICENSE>
def realpath(*args):
"""Join all args and return the real path, rooted at /.
Expands '~', '~user', and environment variables such as :envvar`$HOME`.
Returns ``None`` if any of the args is ``None``.
"""
if None in args:
return None
return os.path.realpath(
os.path.expanduser(os.path.expandvars(os.path.join(*args)))
)
def which(program):
"""Determine full path of executable *program* on :envvar:`PATH`.
(Jay at http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python)
"""
def is_exe(fpath):
"""
Returns True is the path points to an executable file.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
real_program = realpath(program)
if is_exe(real_program):
return real_program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def read_gro(stream):
"""
Parse a gro file
Read an iterable over the lines of a GRO file. Returns the title, the
atoms, and the box. The atoms are returned as a list of dict, where each
dict represents an atom. The keys of the atom dicts are
* 'resid' for the residue number;
* 'resname' for the residue name;
* 'index' for the atom index as written in the file;
* 'name' for the atom name;
* 'x', 'y', and 'z' for the atom coordinates.
The box is returned as a list of floats.
.. note::
The function does not read velocities. Also, it does not support
variable precision.
"""
# The two first lines are the header. The first line is the title of the
# structure, the second line is the number of atoms.
title = next(stream)
natoms = int(next(stream))
# Read the atoms according to the format described in GRO_FIELDS.
# We stop when we reached the number of atoms declared.
atoms = []
for atom_count, line in enumerate(stream, start=0):
if atom_count == natoms:
break
atoms.append({key: convert(line[begin:end].strip())
for key, ((begin, end), convert) in GRO_FIELDS.items()})
else:
raise ValueError('Box missing or invalid number of atoms declared.')
# Read the box.
box = [float(x) for x in line.split()]
# Make sure there is nothing after the box.
try:
next(stream)
except StopIteration:
pass
else:
raise ValueError('Extra lines after the box or '
'invalid number of atoms declared')
return title, atoms, box
def compare_gro(stream, ref_stream, tolerance=0.001):
"""
Compare two gro files with a tolerance on the coordinates
The `stream` and `ref_stream` arguments are iterable over the lines of two
GRO files to compare. The tolerance is provided in nanometer so that
abs(x1 - x2) <= tolerance
The function returns a list of differences. Each difference is represented
as a 'GroDiff' named tupple with the following field:
* 'linenum': the number of the line where the difference occurs, the line
count starts at 1 to make the difference easier to finc in a text editor;
* 'line' and 'ref_line': the line that differ as it is written in 'stream'
and in 'ref_stream', respectivelly;
* 'fields': the specific field that differ beween the lines.
The lines in the differences are re-formated from the information that have
been parsed. The exact string may differ, especially if velocities were
provided.
The fields in the difference list are named after the GRO_FIELDS dict. It
is `None` if the difference occurs in the title or the number of atoms. It
is 'box' if for the box.
If the number of atoms differ between the two files, then the atoms that
exist in only one of the files are all counted as different and the field
is set to `None`. If the box also differ, then its line number in the
repport will be wrong for one of the files.
"""
differences = []
title, atoms, box = read_gro(stream)
title_ref, atoms_ref, box_ref = read_gro(ref_stream)
# Compare the headers
if title != title_ref:
diff = GroDiff(linenum=1, line=title, ref_line=title_ref, fields=None)
differences.append(diff)
if len(atoms) != len(atoms_ref):
diff = GroDiff(linenum=2,
line=str(len(atoms)),
ref_line=str(len(atoms_ref)),
fields=None)
differences.append(diff)
# Compare the atoms
atom_iter = enumerate(
zip_longest(atoms, atoms_ref, fillvalue={}),
start=3
)
for linenum, (atom, atom_ref) in atom_iter:
if atom and atom_ref:
# Both the atom and the reference atoms are defined.
# We compare the fields.
diff_fields = []
for gro_field, (_, gro_type) in GRO_FIELDS.items():
if gro_type == float:
# The field is a float, we compare with a tolerance.
error = math.fabs(atom[gro_field] - atom_ref[gro_field])
if error > tolerance:
diff_fields.append(gro_field)
else:
# The field is an int or a string, we check equality.
if atom[gro_field] != atom_ref[gro_field]:
diff_fields.append(gro_field)
else:
# At least one of the atoms is not defined. They are counted as
# different anyway, no need to compare anything.
diff_fields.append(None)
if diff_fields:
# We found a difference, add it to the list.
line = ref_line = ''
if atom:
line = GRO_TEMPLATE.format(**atom)
if atom_ref:
ref_line = GRO_TEMPLATE.format(**atom_ref)
diff = GroDiff(linenum=linenum,
line=line,
ref_line=ref_line,
fields=diff_fields)
differences.append(diff)
# Compare the box
if box != box_ref:
diff = GroDiff(linenum=linenum + 1,
line=' '.join(map(str, box)),
ref_line=' '.join(map(str, box_ref)),
fields='box')
differences.append(diff)
return differences
def format_gro_diff(differences, outstream=sys.stdout, max_atoms=10):
"""
Format differences between GRO files in a human readable way.
"""
if not differences:
# Do not display anything if the two gro files are identical.
return
# We do not want to modify the input
differences = copy.copy(differences)
# Display the differences in metadata first
if differences and differences[0].linenum == 0:
diff = differences.pop(0)
print('The title is different:', file=outstream)
print(diff.line, file=outstream)
print(diff.ref_line, file=outstream)
if differences and differences[0].linenum == 1:
diff = differences.pop(0)
print('The number of atoms is different! '
'"{}" instead of "{}".'.format(diff.line, diff.ref_line),
file=outstream)
if differences and differences[-1].fields == 'box':
diff = differences.pop(-1)
print('The box is different:', file=outstream)
print(diff.line, file=outstream)
print(diff.ref_line, file=outstream)
# Then display the atoms. Only display 'max_atoms' ones.
if len(differences) > max_atoms:
print('There are {} atoms that differ. '
'Only displaying the {} first ones.'
.format(len(differences), max_atoms),
file=outstream)
for diff in differences[:max_atoms]:
print('On line {}:'.format(diff.linenum), file=outstream)
print(diff.line, file=outstream)
print(diff.ref_line, file=outstream)
def assert_gro_equal(path, ref_path):
"""
Raise an AssertionError if two GRO files are not semantically identical.
"""
diff_out = StringIO()
with open(path) as stream, open(ref_path) as ref_stream:
differences = compare_gro(stream, ref_stream)
format_gro_diff(differences, outstream=diff_out)
assert len(differences) == 0, '\n' + diff_out.getvalue()
def _open_if_needed(handle):
"""
Return handle if it is a ContextStringIO instance else try to open it.
"""
if isinstance(handle, ContextStringIO):
return handle
return open(handle)
@contextlib.contextmanager
def _redirect_out_and_err(stdout, stderr):
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
sys.stdout = stdout
sys.stderr = stderr
yield
finally:
sys.stdout = original_stdout
sys.stderr = original_stderr
| Tsjerk/Insane | tests/utils.py | Python | gpl-2.0 | 12,460 | [
"MDAnalysis"
] | 905a0dd31bc13a0e32969c491ed11f62c254921c2ae46964e660baa587c2a9d3 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
import copy
import hashlib
import inspect
import logging
import math
import pickle as pkl
import re
import uuid
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from functools import reduce
from itertools import product
from typing import Any, Dict, List, Optional
import geohash
import numpy as np
import pandas as pd
import polyline
import simplejson as json
from dateutil import relativedelta as rdelta
from flask import request
from flask_babel import lazy_gettext as _
from geopy.point import Point
from markdown import markdown
from pandas.tseries.frequencies import to_offset
from superset import app, cache, get_css_manifest_files
from superset.exceptions import NullValueException, SpatialException
from superset.utils import core as utils
from superset.utils.core import (
DTTM_ALIAS,
JS_MAX_INTEGER,
merge_extra_filters,
to_adhoc,
)
config = app.config
stats_logger = config["STATS_LOGGER"]
relative_start = config["DEFAULT_RELATIVE_START_TIME"]
relative_end = config["DEFAULT_RELATIVE_END_TIME"]
METRIC_KEYS = [
"metric",
"metrics",
"percent_metrics",
"metric_2",
"secondary_metric",
"x",
"y",
"size",
]
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type: Optional[str] = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
cache_type = "df"
enforce_numerical_metrics = True
def __init__(self, datasource, form_data, force=False):
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get("token", "token_" + uuid.uuid4().hex[:8])
self.groupby = self.form_data.get("groupby") or []
self.time_shift = timedelta()
self.status = None
self.error_msg = ""
self.results = None
self.error_message = None
self.force = force
# Keeping track of whether some data came from cache
# this is useful to trigger the <CachedLabel /> when
# in the cases where visualization have many queries
# (FilterBox for instance)
self._some_from_cache = False
self._any_cache_key = None
self._any_cached_dttm = None
self._extra_chart_data = []
self.process_metrics()
def process_metrics(self):
# metrics in TableViz is order sensitive, so metric_dict should be
# OrderedDict
self.metric_dict = OrderedDict()
fd = self.form_data
for mkey in METRIC_KEYS:
val = fd.get(mkey)
if val:
if not isinstance(val, list):
val = [val]
for o in val:
label = utils.get_metric_name(o)
self.metric_dict[label] = o
# Cast to list needed to return serializable object in py3
self.all_metrics = list(self.metric_dict.values())
self.metric_labels = list(self.metric_dict.keys())
@staticmethod
def handle_js_int_overflow(data):
for d in data.get("records", dict()):
for k, v in list(d.items()):
if isinstance(v, int):
# if an int is too big for Java Script to handle
# convert it to a string
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
def run_extra_queries(self):
"""Lifecycle method to use when more than one query is needed
In rare-ish cases, a visualization may need to execute multiple
queries. That is the case for FilterBox or for time comparison
in Line chart for instance.
In those cases, we need to make sure these queries run before the
main `get_payload` method gets called, so that the overall caching
metadata can be right. The way it works here is that if any of
the previous `get_df_payload` calls hit the cache, the main
payload's metadata will reflect that.
The multi-query support may need more work to become a first class
use case in the framework, and for the UI to reflect the subtleties
(show that only some of the queries were served from cache for
instance). In the meantime, since multi-query is rare, we treat
it with a bit of a hack. Note that the hack became necessary
when moving from caching the visualization's data itself, to caching
the underlying query(ies).
"""
pass
def get_samples(self):
query_obj = self.query_obj()
query_obj.update(
{
"groupby": [],
"metrics": [],
"row_limit": 1000,
"columns": [o.column_name for o in self.datasource.columns],
}
)
df = self.get_df(query_obj)
return df.to_dict(orient="records")
def get_df(
self, query_obj: Optional[Dict[str, Any]] = None
) -> Optional[pd.DataFrame]:
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return None
self.error_msg = ""
timestamp_format = None
if self.datasource.type == "table":
dttm_col = self.datasource.get_col(query_obj["granularity"])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is not None and not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
# Column has already been formatted as a timestamp.
dttm_col = df[DTTM_ALIAS]
one_ts_val = dttm_col[0]
# convert time column to pandas Timestamp, but different
# ways to convert depending on string or int types
try:
int(one_ts_val)
is_integral = True
except (ValueError, TypeError):
is_integral = False
if is_integral:
unit = "s" if timestamp_format == "epoch_s" else "ms"
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def df_metrics_to_num(self, df):
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = self.metric_labels
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def process_query_filters(self):
utils.convert_legacy_filters_into_adhoc(self.form_data)
merge_extra_filters(self.form_data)
utils.split_adhoc_filters_into_base_filters(self.form_data)
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = form_data.get("groupby") or []
metrics = self.all_metrics or []
columns = form_data.get("columns") or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(form_data.get("row_limit") or config["ROW_LIMIT"])
# default order direction
order_desc = form_data.get("order_desc", True)
since, until = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
time_shift = form_data.get("time_shift", "")
self.time_shift = utils.parse_past_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise Exception(_("From date cannot be larger than to date"))
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
"druid_time_origin": form_data.get("druid_time_origin", ""),
"having": form_data.get("having", ""),
"having_druid": form_data.get("having_filters", []),
"time_grain_sqla": form_data.get("time_grain_sqla", ""),
"time_range_endpoints": form_data.get("time_range_endpoints"),
"where": form_data.get("where", ""),
}
d = {
"granularity": granularity,
"from_dttm": from_dttm,
"to_dttm": to_dttm,
"is_timeseries": is_timeseries,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"filter": self.form_data.get("filters", []),
"timeseries_limit": limit,
"extras": extras,
"timeseries_limit_metric": timeseries_limit_metric,
"order_desc": order_desc,
}
return d
@property
def cache_timeout(self):
if self.form_data.get("cache_timeout") is not None:
return int(self.form_data.get("cache_timeout"))
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
return config["CACHE_DEFAULT_TIMEOUT"]
def get_json(self):
return json.dumps(
self.get_payload(), default=utils.json_int_dttm_ser, ignore_nan=True
)
def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm"]:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode("utf-8")).hexdigest()
def get_payload(self, query_obj=None):
"""Returns a payload of metadata and data"""
self.run_extra_queries()
payload = self.get_df_payload(query_obj)
df = payload.get("df")
if self.status != utils.QueryStatus.FAILED:
if df is not None and df.empty:
payload["error"] = "No data"
else:
payload["data"] = self.get_data(df)
if "df" in payload:
del payload["df"]
return payload
def get_df_payload(self, query_obj=None, **kwargs):
"""Handles caching around the df payload retrieval"""
if not query_obj:
query_obj = self.query_obj()
cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
logging.info("Cache key: {}".format(cache_key))
is_loaded = False
stacktrace = None
df = None
cached_dttm = datetime.utcnow().isoformat().split(".")[0]
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr("loaded_from_cache")
try:
cache_value = pkl.loads(cache_value)
df = cache_value["df"]
self.query = cache_value["query"]
self._any_cached_dttm = cache_value["dttm"]
self._any_cache_key = cache_key
self.status = utils.QueryStatus.SUCCESS
is_loaded = True
except Exception as e:
logging.exception(e)
logging.error(
"Error reading cache: " + utils.error_msg_from_exception(e)
)
logging.info("Serving from cache")
if query_obj and not is_loaded:
try:
df = self.get_df(query_obj)
if self.status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
is_loaded = True
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = "{}".format(e)
self.status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if (
is_loaded
and cache_key
and cache
and self.status != utils.QueryStatus.FAILED
):
try:
cache_value = dict(
dttm=cached_dttm,
df=df if df is not None else None,
query=self.query,
)
cache_value = pkl.dumps(cache_value, protocol=pkl.HIGHEST_PROTOCOL)
logging.info(
"Caching {} chars at key {}".format(len(cache_value), cache_key)
)
stats_logger.incr("set_cache_key")
cache.set(cache_key, cache_value, timeout=self.cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
return {
"cache_key": self._any_cache_key,
"cached_dttm": self._any_cached_dttm,
"cache_timeout": self.cache_timeout,
"df": df,
"error": self.error_message,
"form_data": self.form_data,
"is_cached": self._any_cache_key is not None,
"query": self.query,
"status": self.status,
"stacktrace": stacktrace,
"rowcount": len(df.index) if df is not None else 0,
}
def json_dumps(self, obj, sort_keys=False):
return json.dumps(
obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
)
def payload_json_and_has_error(self, payload):
has_error = (
payload.get("status") == utils.QueryStatus.FAILED
or payload.get("error") is not None
)
return self.json_dumps(payload), has_error
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
"form_data": self.form_data,
"token": self.token,
"viz_name": self.viz_type,
"filter_select_enabled": self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config["CSV_EXPORT"])
def get_data(self, df):
return df.to_dict(orient="records")
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (fd.get("granularity") and fd.get("granularity") != "all") or (
fd.get("granularity_sqla") and fd.get("time_grain_sqla")
)
if fd.get("include_time") and not conditions_met:
raise Exception(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return fd.get("include_time")
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if fd.get("all_columns") and (fd.get("groupby") or fd.get("metrics")):
raise Exception(
_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"
)
)
sort_by = fd.get("timeseries_limit_metric")
if fd.get("all_columns"):
d["columns"] = fd.get("all_columns")
d["groupby"] = []
order_by_cols = fd.get("order_by_cols") or []
d["orderby"] = [json.loads(t) for t in order_by_cols]
elif sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in utils.get_metric_names(d["metrics"]):
d["metrics"] += [sort_by]
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
# Add all percent metrics that are not already in the list
if "percent_metrics" in fd:
d["metrics"] = d["metrics"] + list(
filter(lambda m: m not in d["metrics"], fd["percent_metrics"] or [])
)
d["is_timeseries"] = self.should_be_timeseries()
return d
def get_data(self, df):
fd = self.form_data
if not self.should_be_timeseries() and df is not None and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
# Sum up and compute percentages for all percent metrics
percent_metrics = fd.get("percent_metrics") or []
percent_metrics = [utils.get_metric_name(m) for m in percent_metrics]
if len(percent_metrics):
percent_metrics = list(filter(lambda m: m in df, percent_metrics))
metric_sums = {
m: reduce(lambda a, b: a + b, df[m]) for m in percent_metrics
}
metric_percents = {
m: list(
map(
lambda a: None if metric_sums[m] == 0 else a / metric_sums[m],
df[m],
)
)
for m in percent_metrics
}
for m in percent_metrics:
m_name = "%" + m
df[m_name] = pd.Series(metric_percents[m], name=m_name)
# Remove metrics that are not in the main metrics list
metrics = fd.get("metrics") or []
metrics = [utils.get_metric_name(m) for m in metrics]
for m in filter(
lambda m: m not in metrics and m in df.columns, percent_metrics
):
del df[m]
data = self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)
return data
def json_dumps(self, obj, sort_keys=False):
return json.dumps(
obj, default=utils.json_iso_dttm_ser, sort_keys=sort_keys, ignore_nan=True
)
class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
viz_type = "time_table"
verbose_name = _("Time Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if not fd.get("metrics"):
raise Exception(_("Pick at least one metric"))
if fd.get("groupby") and len(fd.get("metrics")) > 1:
raise Exception(
_("When using 'Group By' you are limited to use a single metric")
)
return d
def get_data(self, df):
fd = self.form_data
columns = None
values = self.metric_labels
if fd.get("groupby"):
values = self.metric_labels[0]
columns = fd.get("groupby")
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
return dict(
records=pt.to_dict(orient="index"),
columns=list(pt.columns),
is_group_by=len(fd.get("groupby")) > 0,
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super().query_obj()
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
metrics = self.form_data.get("metrics")
transpose = self.form_data.get("transpose_pivot")
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception(_("Please choose at least one 'Group by' field "))
if transpose and not columns:
raise Exception(
_(
(
"Please choose at least one 'Columns' field when "
"select 'Transpose Pivot' option"
)
)
)
if not metrics:
raise Exception(_("Please choose at least one metric"))
if any(v in groupby for v in columns) or any(v in columns for v in groupby):
raise Exception(_("Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df):
if self.form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
aggfunc = self.form_data.get("pandas_aggfunc") or "sum"
# Ensure that Pandas's sum function mimics that of SQL.
if aggfunc == "sum":
aggfunc = lambda x: x.sum(min_count=1)
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
if self.form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=[utils.get_metric_name(m) for m in self.form_data.get("metrics")],
aggfunc=aggfunc,
margins=self.form_data.get("pivot_margins"),
)
# Display metrics side by side with each column
if self.form_data.get("combine_metric"):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep="null",
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover"
).split(" "),
),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def query_obj(self):
return None
def get_df(
self, query_obj: Optional[Dict[str, Any]] = None
) -> Optional[pd.DataFrame]:
return None
def get_data(self, df):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", "")
if markup_type == "markdown":
code = markdown(code)
return dict(html=code, theme_css=get_css_manifest_files("theme"))
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super().query_obj()
d["groupby"] = [self.form_data.get("series")]
return d
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v} for n, v in zip(df.index, df[metric])]
else:
result = [
{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]
]
return result
def get_data(self, df):
df = df.set_index(self.form_data.get("groupby"))
chart_data = [
{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns
]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = "<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>"
is_timeseries = True
def get_data(self, df):
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10 ** 9)] = obj.get(metric)
data[metric] = values
start, end = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
if not start or not end:
raise Exception("Please provide both time bounds (Since and Until)")
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1
else:
range_ = diff_secs // (60 * 60) + 1
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["metrics"] = fd.get("metrics")
return d
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type: Optional[str] = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed="", title_suffix=""):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "nanmedian":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({"label": chart_label, "values": box})
return chart_data
def get_data(self, df):
form_data = self.form_data
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.nanpercentile(series, 25)
def Q3(series):
return np.nanpercentile(series, 75)
whisker_type = form_data.get("whisker_options")
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
return series[series <= upper_outer_lim].max()
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
return series[series >= lower_outer_lim].min()
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.nanpercentile(series, int(high))
def whisker_low(series):
return np.nanpercentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.nanmedian, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get("groupby")).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [form_data.get("entity")]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
self.x_metric = form_data.get("x")
self.y_metric = form_data.get("y")
self.z_metric = form_data.get("size")
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [self.z_metric, self.x_metric, self.y_metric]
if len(set(self.metric_labels)) < 3:
raise Exception(_("Please use 3 different metric labels"))
if not all(d["metrics"] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
def get_data(self, df):
df["x"] = df[[utils.get_metric_name(self.x_metric)]]
df["y"] = df[[utils.get_metric_name(self.y_metric)]]
df["size"] = df[[utils.get_metric_name(self.z_metric)]]
df["shape"] = "circle"
df["group"] = df[[self.series]]
series = defaultdict(list)
for row in df.to_dict(orient="records"):
series[row["group"]].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({"key": k, "values": v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
self.metric = form_data.get("metric")
def as_strings(field):
value = form_data.get(field)
return value.split(",") if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats("ranges")
self.range_labels = as_strings("range_labels")
self.markers = as_floats("markers")
self.marker_labels = as_strings("marker_labels")
self.marker_lines = as_floats("marker_lines")
self.marker_line_labels = as_strings("marker_line_labels")
d["metrics"] = [self.metric]
if not self.metric:
raise Exception(_("Pick a metric to display"))
return d
def get_data(self, df):
df["metric"] = df[[utils.get_metric_name(self.metric)]]
values = df["metric"].values
return {
"measures": values.tolist(),
"ranges": self.ranges or [0, values.max() * 1.1],
"rangeLabels": self.range_labels or None,
"markers": self.markers or None,
"markerLabels": self.marker_labels or None,
"markerLines": self.marker_lines or None,
"markerLineLabels": self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise Exception(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return d
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise Exception(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
# Limiting rows is not required as only one cell is returned
d["row_limit"] = None
return d
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
def to_series(self, df, classed="", title_suffix=""):
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, list):
series_title = [str(title) for title in name]
elif isinstance(name, tuple):
series_title = tuple(str(title) for title in name)
else:
series_title = str(name)
if (
isinstance(series_title, (list, tuple))
and len(series_title) > 1
and len(self.metric_labels) == 1
):
# Removing metric from series name if only one metric
series_title = series_title[1:]
if title_suffix:
if isinstance(series_title, str):
series_title = (series_title, title_suffix)
elif isinstance(series_title, (list, tuple)):
series_title = series_title + (title_suffix,)
values = []
non_nan_cnt = 0
for ds in df.index:
if ds in ys:
d = {"x": ds, "y": ys[ds]}
if not np.isnan(ys[ds]):
non_nan_cnt += 1
else:
d = {}
values.append(d)
if non_nan_cnt == 0:
continue
d = {"key": series_title, "values": values}
if classed:
d["classed"] = classed
chart_data.append(d)
return chart_data
def process_data(self, df, aggregate=False):
fd = self.form_data
if fd.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
if aggregate:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=0,
aggfunc=sum,
)
else:
df = df.pivot_table(
index=DTTM_ALIAS, columns=fd.get("groupby"), values=self.metric_labels
)
rule = fd.get("resample_rule")
method = fd.get("resample_method")
if rule and method:
df = getattr(df.resample(rule), method)()
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ("mean", "std", "sum") and rolling_periods:
kwargs = dict(window=rolling_periods, min_periods=min_periods)
if rolling_type == "mean":
df = df.rolling(**kwargs).mean()
elif rolling_type == "std":
df = df.rolling(**kwargs).std()
elif rolling_type == "sum":
df = df.rolling(**kwargs).sum()
elif rolling_type == "cumsum":
df = df.cumsum()
if min_periods:
df = df[min_periods:]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
return df
def run_extra_queries(self):
fd = self.form_data
time_compare = fd.get("time_compare") or []
# backwards compatibility
if not isinstance(time_compare, list):
time_compare = [time_compare]
for option in time_compare:
query_object = self.query_obj()
delta = utils.parse_past_timedelta(option)
query_object["inner_from_dttm"] = query_object["from_dttm"]
query_object["inner_to_dttm"] = query_object["to_dttm"]
if not query_object["from_dttm"] or not query_object["to_dttm"]:
raise Exception(
_(
"`Since` and `Until` time bounds should be specified "
"when using the `Time Shift` feature."
)
)
query_object["from_dttm"] -= delta
query_object["to_dttm"] -= delta
df2 = self.get_df_payload(query_object, time_compare=option).get("df")
if df2 is not None and DTTM_ALIAS in df2:
label = "{} offset".format(option)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
self._extra_chart_data.append((label, df2))
def get_data(self, df):
fd = self.form_data
comparison_type = fd.get("comparison_type") or "values"
df = self.process_data(df)
if comparison_type == "values":
# Filter out series with all NaN
chart_data = self.to_series(df.dropna(axis=1, how="all"))
for i, (label, df2) in enumerate(self._extra_chart_data):
chart_data.extend(
self.to_series(
df2, classed="time-shift-{}".format(i), title_suffix=label
)
)
else:
chart_data = []
for i, (label, df2) in enumerate(self._extra_chart_data):
# reindex df2 into the df2 index
combined_index = df.index.union(df2.index)
df2 = (
df2.reindex(combined_index)
.interpolate(method="time")
.reindex(df.index)
)
if comparison_type == "absolute":
diff = df - df2
elif comparison_type == "percentage":
diff = (df - df2) / df2
elif comparison_type == "ratio":
diff = df / df2
else:
raise Exception(
"Invalid `comparison_type`: {0}".format(comparison_type)
)
# remove leading/trailing NaNs from the time shift difference
diff = diff[diff.first_valid_index() : diff.last_valid_index()]
chart_data.extend(
self.to_series(
diff, classed="time-shift-{}".format(i), title_suffix=label
)
)
if not self.sort_series:
chart_data = sorted(chart_data, key=lambda x: tuple(x["key"]))
return chart_data
class MultiLineViz(NVD3Viz):
"""Pile on multiple line charts"""
viz_type = "line_multi"
verbose_name = _("Time Series - Multiple Line Charts")
is_timeseries = True
def query_obj(self):
return None
def get_data(self, df):
fd = self.form_data
# Late imports to avoid circular import issues
from superset.models.core import Slice
from superset import db
slice_ids1 = fd.get("line_charts")
slices1 = db.session.query(Slice).filter(Slice.id.in_(slice_ids1)).all()
slice_ids2 = fd.get("line_charts_2")
slices2 = db.session.query(Slice).filter(Slice.id.in_(slice_ids2)).all()
return {
"slices": {
"axis1": [slc.data for slc in slices1],
"axis2": [slc.data for slc in slices2],
}
}
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super().query_obj()
m1 = self.form_data.get("metric")
m2 = self.form_data.get("metric_2")
d["metrics"] = [m1, m2]
if not m1:
raise Exception(_("Pick a metric for left axis!"))
if not m2:
raise Exception(_("Pick a metric for right axis!"))
if m1 == m2:
raise Exception(
_("Please choose different metrics" " on left and right axis")
)
return d
def to_series(self, df, classed=""):
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
metrics = [self.form_data.get("metric"), self.form_data.get("metric_2")]
for i, m in enumerate(metrics):
m = utils.get_metric_name(m)
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{"x": ds, "y": ys[ds] if ds in ys else None} for ds in df.index
],
"yAxis": i + 1,
"type": "line",
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
if self.form_data.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
metric = utils.get_metric_name(fd.get("metric"))
metric_2 = utils.get_metric_name(fd.get("metric_2"))
df = df.pivot_table(index=DTTM_ALIAS, values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3TimePivotViz(NVD3TimeSeriesViz):
"""Time Series - Periodicity Pivot"""
viz_type = "time_pivot"
sort_series = True
verbose_name = _("Time Series - Period Pivot")
def query_obj(self):
d = super().query_obj()
d["metrics"] = [self.form_data.get("metric")]
return d
def get_data(self, df):
fd = self.form_data
df = self.process_data(df)
freq = to_offset(fd.get("freq"))
try:
freq = type(freq)(freq.n, normalize=True, **freq.kwds)
except ValueError:
freq = type(freq)(freq.n, **freq.kwds)
df.index.name = None
df[DTTM_ALIAS] = df.index.map(freq.rollback)
df["ranked"] = df[DTTM_ALIAS].rank(method="dense", ascending=False) - 1
df.ranked = df.ranked.map(int)
df["series"] = "-" + df.ranked.map(str)
df["series"] = df["series"].str.replace("-0", "current")
rank_lookup = {
row["series"]: row["ranked"] for row in df.to_dict(orient="records")
}
max_ts = df[DTTM_ALIAS].max()
max_rank = df["ranked"].max()
df[DTTM_ALIAS] = df.index + (max_ts - df[DTTM_ALIAS])
df = df.pivot_table(
index=DTTM_ALIAS,
columns="series",
values=utils.get_metric_name(fd.get("metric")),
)
chart_data = self.to_series(df)
for serie in chart_data:
serie["rank"] = rank_lookup[serie["key"]]
serie["perc"] = 1 - (serie["rank"] / (max_rank + 1))
return chart_data
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = "compare"
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df):
metric = self.metric_labels[0]
df = df.pivot_table(index=self.groupby, values=[metric])
df.sort_values(by=metric, ascending=False, inplace=True)
df = df.reset_index()
df.columns = ["x", "y"]
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super().query_obj()
d["row_limit"] = self.form_data.get("row_limit", int(config["VIZ_ROW_LIMIT"]))
numeric_columns = self.form_data.get("all_columns_x")
if numeric_columns is None:
raise Exception(_("Must have at least one numeric column specified"))
self.columns = numeric_columns
d["columns"] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d["groupby"] = []
return d
def labelify(self, keys, column):
if isinstance(keys, str):
keys = (keys,)
# removing undesirable characters
labels = [re.sub(r"\W+", r"_", k) for k in keys]
if len(self.columns) > 1 or not self.groupby:
# Only show numeric column in label if there are many
labels = [column] + labels
return "__".join(labels)
def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend(
[
{
"key": self.labelify(keys, column),
"values": data[column].tolist(),
}
for column in self.columns
]
)
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if len(d["groupby"]) < len(fd.get("groupby") or []) + len(
fd.get("columns") or []
):
raise Exception(_("Can't have overlap between Series and Breakdowns"))
if not fd.get("metrics"):
raise Exception(_("Pick at least one metric"))
if not fd.get("groupby"):
raise Exception(_("Pick at least one field for [Series]"))
return d
def get_data(self, df):
fd = self.form_data
metrics = self.metric_labels
row = df.groupby(self.groupby).sum()[metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get("columns") or []
pt = df.pivot_table(index=self.groupby, columns=columns, values=metrics)
if fd.get("contribution"):
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.items():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, str):
series_title = name
else:
offset = 0 if len(metrics) > 1 else 1
series_title = ", ".join([str(s) for s in name[offset:]])
values = []
for i, v in ys.items():
x = i
if isinstance(x, (tuple, list)):
x = ", ".join([str(s) for s in x])
else:
x = str(x)
values.append({"x": x, "y": v})
d = {"key": series_title, "values": values}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
"Kerry Rodden "
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>'
)
def get_data(self, df):
fd = self.form_data
cols = fd.get("groupby")
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
if metric == secondary_metric or secondary_metric is None:
df.columns = cols + ["m1"]
df["m2"] = df["m1"]
return json.loads(df.to_json(orient="values"))
def query_obj(self):
qry = super().query_obj()
fd = self.form_data
qry["metrics"] = [fd["metric"]]
secondary_metric = fd.get("secondary_metric")
if secondary_metric and secondary_metric != fd["metric"]:
qry["metrics"].append(secondary_metric)
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super().query_obj()
if len(qry["groupby"]) != 2:
raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df):
df.columns = ["source", "target", "value"]
df["source"] = df["source"].astype(str)
df["target"] = df["target"].astype(str)
recs = df.to_dict(orient="records")
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row["source"]].add(row["target"])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(
_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}"
).format(cycle)
)
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super().query_obj()
if len(self.form_data["groupby"]) != 2:
raise Exception(_("Pick exactly 2 columns to 'Group By'"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df):
df.columns = ["source", "target", "value"]
return df.to_dict(orient="records")
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super().query_obj()
fd = self.form_data
qry["groupby"] = [fd.get("groupby"), fd.get("columns")]
qry["metrics"] = [utils.get_metric_name(fd.get("metric"))]
return qry
def get_data(self, df):
df.columns = ["source", "target", "value"]
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df["source"]) | set(df["target"]))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {"nodes": list(nodes), "matrix": m}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = "From bl.ocks.org By john-guerra"
def query_obj(self):
qry = super().query_obj()
qry["metrics"] = [self.form_data["metric"]]
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df):
fd = self.form_data
cols = [fd.get("entity")]
metric = self.metric_labels[0]
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ["country_id", "metric"]
d = df.to_dict(orient="records")
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super().query_obj()
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df):
from superset.examples import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
country = countries.get(fd.get("country_fieldtype"), row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
cache_type = "get_data"
filter_row_limit = 1000
def query_obj(self):
return None
def run_extra_queries(self):
qry = super().query_obj()
filters = self.form_data.get("filter_configs") or []
qry["row_limit"] = self.filter_row_limit
self.dataframes = {}
for flt in filters:
col = flt.get("column")
if not col:
raise Exception(
_("Invalid filter configuration, please select a column")
)
qry["groupby"] = [col]
metric = flt.get("metric")
qry["metrics"] = [metric] if metric else []
df = self.get_df_payload(query_obj=qry).get("df")
self.dataframes[col] = df
def get_data(self, df):
filters = self.form_data.get("filter_configs") or []
d = {}
for flt in filters:
col = flt.get("column")
metric = flt.get("metric")
df = self.dataframes.get(col)
if metric:
df = df.sort_values(
utils.get_metric_name(metric), ascending=flt.get("asc")
)
d[col] = [
{"id": row[0], "text": row[0], "metric": row[1]}
for row in df.itertuples(index=False)
]
else:
df = df.sort_values(col, ascending=flt.get("asc"))
d[col] = [
{"id": row[0], "text": row[0]} for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
return None
def get_df(self, query_obj: Dict[str, Any] = None) -> Optional[pd.DataFrame]:
return None
def get_data(self, df):
return {}
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
"Syntagmatic's library</a>"
)
is_timeseries = False
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["groupby"] = [fd.get("series")]
return d
def get_data(self, df):
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
"bl.ocks.org</a>"
)
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["metrics"] = [fd.get("metric")]
d["groupby"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
return d
def get_data(self, df):
fd = self.form_data
x = fd.get("all_columns_x")
y = fd.get("all_columns_y")
v = self.metric_labels[0]
if x == y:
df.columns = ["x", "y", "v"]
else:
df = df[[x, y, v]]
df.columns = ["x", "y", "v"]
norm = fd.get("normalize_across")
overall = False
max_ = df.v.max()
min_ = df.v.min()
if norm == "heatmap":
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df["perc"] = gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min())
)
df["rank"] = gb.apply(lambda x: x.v.rank(pct=True))
if overall:
df["perc"] = (df.v - min_) / (max_ - min_)
df["rank"] = df.v.rank(pct=True)
return {"records": df.to_dict(orient="records"), "extents": [min_, max_]}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
"d3-horizon-chart</a>"
)
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = "<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>"
def query_obj(self):
d = super().query_obj()
fd = self.form_data
label_col = fd.get("mapbox_label")
if not fd.get("groupby"):
if fd.get("all_columns_x") is None or fd.get("all_columns_y") is None:
raise Exception(_("[Longitude] and [Latitude] must be set"))
d["columns"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(
_(
"Must have a [Group By] column to have 'count' as the "
+ "[Label]"
)
)
d["columns"].append(label_col[0])
if fd.get("point_radius") != "Auto":
d["columns"].append(fd.get("point_radius"))
d["columns"] = list(set(d["columns"]))
else:
# Ensuring columns chosen are all in group by
if (
label_col
and len(label_col) >= 1
and label_col[0] != "count"
and label_col[0] not in fd.get("groupby")
):
raise Exception(_("Choice of [Label] must be present in [Group By]"))
if fd.get("point_radius") != "Auto" and fd.get(
"point_radius"
) not in fd.get("groupby"):
raise Exception(
_("Choice of [Point Radius] must be present in [Group By]")
)
if fd.get("all_columns_x") not in fd.get("groupby") or fd.get(
"all_columns_y"
) not in fd.get("groupby"):
raise Exception(
_(
"[Longitude] and [Latitude] columns must be present in "
+ "[Group By]"
)
)
return d
def get_data(self, df):
if df is None:
return None
fd = self.form_data
label_col = fd.get("mapbox_label")
has_custom_metric = label_col is not None and len(label_col) > 0
metric_col = [None] * len(df.index)
if has_custom_metric:
if label_col[0] == fd.get("all_columns_x"):
metric_col = df[fd.get("all_columns_x")]
elif label_col[0] == fd.get("all_columns_y"):
metric_col = df[fd.get("all_columns_y")]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")]
)
# limiting geo precision as long decimal values trigger issues
# around json-bignumber in Mapbox
GEO_PRECISION = 10
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"metric": metric, "radius": point_radius},
"geometry": {
"type": "Point",
"coordinates": [
round(lon, GEO_PRECISION),
round(lat, GEO_PRECISION),
],
},
}
for lon, lat, metric, point_radius in zip(
df[fd.get("all_columns_x")],
df[fd.get("all_columns_y")],
metric_col,
point_radius_col,
)
],
}
x_series, y_series = df[fd.get("all_columns_x")], df[fd.get("all_columns_y")]
south_west = [x_series.min(), y_series.min()]
north_east = [x_series.max(), y_series.max()]
return {
"geoJSON": geo_json,
"hasCustomMetric": has_custom_metric,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"bounds": [south_west, north_east],
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class DeckGLMultiLayer(BaseViz):
"""Pile on multiple DeckGL layers"""
viz_type = "deck_multi"
verbose_name = _("Deck.gl - Multiple Layers")
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
def query_obj(self):
return None
def get_data(self, df):
fd = self.form_data
# Late imports to avoid circular import issues
from superset.models.core import Slice
from superset import db
slice_ids = fd.get("deck_slices")
slices = db.session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
return {
"mapboxApiKey": config["MAPBOX_API_KEY"],
"slices": [slc.data for slc in slices],
}
class BaseDeckGLViz(BaseViz):
"""Base class for deck.gl visualizations"""
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
spatial_control_keys: List[str] = []
def get_metrics(self):
self.metric = self.form_data.get("size")
return [self.metric] if self.metric else []
def process_spatial_query_obj(self, key, group_by):
group_by.extend(self.get_spatial_columns(key))
def get_spatial_columns(self, key):
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
return [spatial.get("lonCol"), spatial.get("latCol")]
elif spatial.get("type") == "delimited":
return [spatial.get("lonlatCol")]
elif spatial.get("type") == "geohash":
return [spatial.get("geohashCol")]
@staticmethod
def parse_coordinates(s):
if not s:
return None
try:
p = Point(s)
return (p.latitude, p.longitude) # pylint: disable=no-member
except Exception:
raise SpatialException(_("Invalid spatial point encountered: %s" % s))
@staticmethod
def reverse_geohash_decode(geohash_code):
lat, lng = geohash.decode(geohash_code)
return (lng, lat)
@staticmethod
def reverse_latlong(df, key):
df[key] = [tuple(reversed(o)) for o in df[key] if isinstance(o, (list, tuple))]
def process_spatial_data_obj(self, key, df):
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
df[key] = list(
zip(
pd.to_numeric(df[spatial.get("lonCol")], errors="coerce"),
pd.to_numeric(df[spatial.get("latCol")], errors="coerce"),
)
)
elif spatial.get("type") == "delimited":
lon_lat_col = spatial.get("lonlatCol")
df[key] = df[lon_lat_col].apply(self.parse_coordinates)
del df[lon_lat_col]
elif spatial.get("type") == "geohash":
df[key] = df[spatial.get("geohashCol")].map(self.reverse_geohash_decode)
del df[spatial.get("geohashCol")]
if spatial.get("reverseCheckbox"):
self.reverse_latlong(df, key)
if df.get(key) is None:
raise NullValueException(
_(
"Encountered invalid NULL spatial entry, \
please consider filtering those out"
)
)
return df
def add_null_filters(self):
fd = self.form_data
spatial_columns = set()
for key in self.spatial_control_keys:
for column in self.get_spatial_columns(key):
spatial_columns.add(column)
if fd.get("adhoc_filters") is None:
fd["adhoc_filters"] = []
line_column = fd.get("line_column")
if line_column:
spatial_columns.add(line_column)
for column in sorted(spatial_columns):
filter_ = to_adhoc({"col": column, "op": "IS NOT NULL", "val": ""})
fd["adhoc_filters"].append(filter_)
def query_obj(self):
fd = self.form_data
# add NULL filters
if fd.get("filter_nulls", True):
self.add_null_filters()
d = super().query_obj()
gb = []
for key in self.spatial_control_keys:
self.process_spatial_query_obj(key, gb)
if fd.get("dimension"):
gb += [fd.get("dimension")]
if fd.get("js_columns"):
gb += fd.get("js_columns")
metrics = self.get_metrics()
gb = list(set(gb))
if metrics:
d["groupby"] = gb
d["metrics"] = metrics
d["columns"] = []
else:
d["columns"] = gb
return d
def get_js_columns(self, d):
cols = self.form_data.get("js_columns") or []
return {col: d.get(col) for col in cols}
def get_data(self, df):
if df is None:
return None
# Processing spatial info
for key in self.spatial_control_keys:
df = self.process_spatial_data_obj(key, df)
features = []
for d in df.to_dict(orient="records"):
feature = self.get_properties(d)
extra_props = self.get_js_columns(d)
if extra_props:
feature["extraProps"] = extra_props
features.append(feature)
return {
"features": features,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"metricLabels": self.metric_labels,
}
def get_properties(self, d):
raise NotImplementedError()
class DeckScatterViz(BaseDeckGLViz):
"""deck.gl's ScatterLayer"""
viz_type = "deck_scatter"
verbose_name = _("Deck.gl - Scatter plot")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
self.point_radius_fixed = fd.get("point_radius_fixed") or {
"type": "fix",
"value": 500,
}
return super().query_obj()
def get_metrics(self):
self.metric = None
if self.point_radius_fixed.get("type") == "metric":
self.metric = self.point_radius_fixed.get("value")
return [self.metric]
return None
def get_properties(self, d):
return {
"metric": d.get(self.metric_label),
"radius": self.fixed_value
if self.fixed_value
else d.get(self.metric_label),
"cat_color": d.get(self.dim) if self.dim else None,
"position": d.get("spatial"),
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df):
fd = self.form_data
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
self.point_radius_fixed = fd.get("point_radius_fixed")
self.fixed_value = None
self.dim = self.form_data.get("dimension")
if self.point_radius_fixed.get("type") != "metric":
self.fixed_value = self.point_radius_fixed.get("value")
return super().get_data(df)
class DeckScreengrid(BaseDeckGLViz):
"""deck.gl's ScreenGridLayer"""
viz_type = "deck_screengrid"
verbose_name = _("Deck.gl - Screen Grid")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = fd.get("time_grain_sqla") or fd.get("granularity")
return super().query_obj()
def get_properties(self, d):
return {
"position": d.get("spatial"),
"weight": d.get(self.metric_label) or 1,
"__timestamp": d.get(DTTM_ALIAS) or d.get("__time"),
}
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
class DeckGrid(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_grid"
verbose_name = _("Deck.gl - 3D Grid")
spatial_control_keys = ["spatial"]
def get_properties(self, d):
return {"position": d.get("spatial"), "weight": d.get(self.metric_label) or 1}
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
def geohash_to_json(geohash_code):
p = geohash.bbox(geohash_code)
return [
[p.get("w"), p.get("n")],
[p.get("e"), p.get("n")],
[p.get("e"), p.get("s")],
[p.get("w"), p.get("s")],
[p.get("w"), p.get("n")],
]
class DeckPathViz(BaseDeckGLViz):
"""deck.gl's PathLayer"""
viz_type = "deck_path"
verbose_name = _("Deck.gl - Paths")
deck_viz_key = "path"
is_timeseries = True
deser_map = {
"json": json.loads,
"polyline": polyline.decode,
"geohash": geohash_to_json,
}
def query_obj(self):
fd = self.form_data
self.is_timeseries = fd.get("time_grain_sqla") or fd.get("granularity")
d = super().query_obj()
self.metric = fd.get("metric")
line_col = fd.get("line_column")
if d["metrics"]:
self.has_metrics = True
d["groupby"].append(line_col)
else:
self.has_metrics = False
d["columns"].append(line_col)
return d
def get_properties(self, d):
fd = self.form_data
line_type = fd.get("line_type")
deser = self.deser_map[line_type]
line_column = fd.get("line_column")
path = deser(d[line_column])
if fd.get("reverse_long_lat"):
path = [(o[1], o[0]) for o in path]
d[self.deck_viz_key] = path
if line_type != "geohash":
del d[line_column]
d["__timestamp"] = d.get(DTTM_ALIAS) or d.get("__time")
return d
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
class DeckPolygon(DeckPathViz):
"""deck.gl's Polygon Layer"""
viz_type = "deck_polygon"
deck_viz_key = "polygon"
verbose_name = _("Deck.gl - Polygon")
def query_obj(self):
fd = self.form_data
self.elevation = fd.get("point_radius_fixed") or {"type": "fix", "value": 500}
return super().query_obj()
def get_metrics(self):
metrics = [self.form_data.get("metric")]
if self.elevation.get("type") == "metric":
metrics.append(self.elevation.get("value"))
return [metric for metric in metrics if metric]
def get_properties(self, d):
super().get_properties(d)
fd = self.form_data
elevation = fd["point_radius_fixed"]["value"]
type_ = fd["point_radius_fixed"]["type"]
d["elevation"] = (
d.get(utils.get_metric_name(elevation)) if type_ == "metric" else elevation
)
return d
class DeckHex(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_hex"
verbose_name = _("Deck.gl - 3D HEX")
spatial_control_keys = ["spatial"]
def get_properties(self, d):
return {"position": d.get("spatial"), "weight": d.get(self.metric_label) or 1}
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super(DeckHex, self).get_data(df)
class DeckGeoJson(BaseDeckGLViz):
"""deck.gl's GeoJSONLayer"""
viz_type = "deck_geojson"
verbose_name = _("Deck.gl - GeoJSON")
def query_obj(self):
d = super().query_obj()
d["columns"] += [self.form_data.get("geojson")]
d["metrics"] = []
d["groupby"] = []
return d
def get_properties(self, d):
geojson = d.get(self.form_data.get("geojson"))
return json.loads(geojson)
class DeckArc(BaseDeckGLViz):
"""deck.gl's Arc Layer"""
viz_type = "deck_arc"
verbose_name = _("Deck.gl - Arc")
spatial_control_keys = ["start_spatial", "end_spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d):
dim = self.form_data.get("dimension")
return {
"sourcePosition": d.get("start_spatial"),
"targetPosition": d.get("end_spatial"),
"cat_color": d.get(dim) if dim else None,
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df):
d = super().get_data(df)
return {"features": d["features"], "mapboxApiKey": config["MAPBOX_API_KEY"]}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self):
query = super().query_obj()
form_data = self.form_data
event_key = form_data.get("all_columns_x")
entity_key = form_data.get("entity")
meta_keys = [
col
for col in form_data.get("all_columns")
if col != event_key and col != entity_key
]
query["columns"] = [event_key, entity_key] + meta_keys
if form_data["order_by_entity"]:
query["orderby"] = [(entity_key, True)]
return query
def get_data(self, df):
return df.to_dict(orient="records")
class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = "paired_ttest"
verbose_name = _("Time Series - Paired t-test")
sort_series = False
is_timeseries = True
def get_data(self, df):
"""
Transform received data frame into an object of the form:
{
'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
}, ...
], ...
}
"""
fd = self.form_data
groups = fd.get("groupby")
metrics = self.metric_labels
df = df.pivot_table(index=DTTM_ALIAS, columns=groups, values=metrics)
cols = []
# Be rid of falsey keys
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
data = {}
series = df.to_dict("series")
for nameSet in df.columns:
# If no groups are defined, nameSet will be the metric name
hasGroup = not isinstance(nameSet, str)
Y = series[nameSet]
d = {
"group": nameSet[1:] if hasGroup else "All",
"values": [{"x": t, "y": Y[t] if t in Y else None} for t in df.index],
}
key = nameSet[0] if hasGroup else nameSet
if key in data:
data[key].append(d)
else:
data[key] = [d]
return data
class RoseViz(NVD3TimeSeriesViz):
viz_type = "rose"
verbose_name = _("Time Series - Nightingale Rose Chart")
sort_series = False
is_timeseries = True
def get_data(self, df):
data = super().get_data(df)
result = {}
for datum in data:
key = datum["key"]
for val in datum["values"]:
timestamp = val["x"].value
if not result.get(timestamp):
result[timestamp] = []
value = 0 if math.isnan(val["y"]) else val["y"]
result[timestamp].append(
{
"key": key,
"value": value,
"name": ", ".join(key) if isinstance(key, list) else key,
"time": val["x"],
}
)
return result
class PartitionViz(NVD3TimeSeriesViz):
"""
A hierarchical data visualization with support for time series.
"""
viz_type = "partition"
verbose_name = _("Partition Diagram")
def query_obj(self):
query_obj = super().query_obj()
time_op = self.form_data.get("time_series_option", "not_time")
# Return time series data if the user specifies so
query_obj["is_timeseries"] = time_op != "not_time"
return query_obj
def levels_for(self, time_op, groups, df):
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean()
if time_op == "agg_mean"
else agg_df.sum(numeric_only=True)
)
return levels
def levels_for_diff(self, time_op, groups, df):
# Obtain a unique list of the time grains
times = list(set(df[DTTM_ALIAS]))
times.sort()
until = times[len(times) - 1]
since = times[0]
# Function describing how to calculate the difference
func = {
"point_diff": [pd.Series.sub, lambda a, b, fill_value: a - b],
"point_factor": [pd.Series.div, lambda a, b, fill_value: a / float(b)],
"point_percent": [
lambda a, b, fill_value=0: a.div(b, fill_value=fill_value) - 1,
lambda a, b, fill_value: a / float(b) - 1,
],
}[time_op]
agg_df = df.groupby(DTTM_ALIAS).sum()
levels = {
0: pd.Series(
{
m: func[1](agg_df[m][until], agg_df[m][since], 0)
for m in agg_df.columns
}
)
}
for i in range(1, len(groups) + 1):
agg_df = df.groupby([DTTM_ALIAS] + groups[:i]).sum()
levels[i] = pd.DataFrame(
{
m: func[0](agg_df[m][until], agg_df[m][since], fill_value=0)
for m in agg_df.columns
}
)
return levels
def levels_for_time(self, groups, df):
procs = {}
for i in range(0, len(groups) + 1):
self.form_data["groupby"] = groups[:i]
df_drop = df.drop(groups[i:], 1)
procs[i] = self.process_data(df_drop, aggregate=True)
self.form_data["groupby"] = groups
return procs
def nest_values(self, levels, level=0, metric=None, dims=()):
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if not level:
return [
{
"name": m,
"val": levels[0][m],
"children": self.nest_values(levels, 1, m),
}
for m in levels[0].index
]
if level == 1:
return [
{
"name": i,
"val": levels[1][metric][i],
"children": self.nest_values(levels, 2, metric, (i,)),
}
for i in levels[1][metric].index
]
if level >= len(levels):
return []
return [
{
"name": i,
"val": levels[level][metric][dims][i],
"children": self.nest_values(levels, level + 1, metric, dims + (i,)),
}
for i in levels[level][metric][dims].index
]
def nest_procs(self, procs, level=-1, dims=(), time=None):
if level == -1:
return [
{"name": m, "children": self.nest_procs(procs, 0, (m,))}
for m in procs[0].columns
]
if not level:
return [
{
"name": t,
"val": procs[0][dims[0]][t],
"children": self.nest_procs(procs, 1, dims, t),
}
for t in procs[0].index
]
if level >= len(procs):
return []
return [
{
"name": i,
"val": procs[level][dims][i][time],
"children": self.nest_procs(procs, level + 1, dims + (i,), time),
}
for i in procs[level][dims].columns
]
def get_data(self, df):
fd = self.form_data
groups = fd.get("groupby", [])
time_op = fd.get("time_series_option", "not_time")
if not len(groups):
raise ValueError("Please choose at least one groupby")
if time_op == "not_time":
levels = self.levels_for("agg_sum", groups, df)
elif time_op in ["agg_sum", "agg_mean"]:
levels = self.levels_for(time_op, groups, df)
elif time_op in ["point_diff", "point_factor", "point_percent"]:
levels = self.levels_for_diff(time_op, groups, df)
elif time_op == "adv_anal":
procs = self.levels_for_time(groups, df)
return self.nest_procs(procs)
else:
levels = self.levels_for("agg_sum", [DTTM_ALIAS] + groups, df)
return self.nest_values(levels)
viz_types = {
o.viz_type: o
for o in globals().values()
if (
inspect.isclass(o)
and issubclass(o, BaseViz)
and o.viz_type not in config["VIZ_TYPE_BLACKLIST"]
)
}
| zhouyao1994/incubator-superset | superset/viz.py | Python | apache-2.0 | 91,799 | [
"VisIt"
] | 0bb53b189dda45c4c4c74b10a21c0234d50515ee980e6a6a28009812711d5f71 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for the Google Chrome Cookie database."""
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
# pylint: disable=unused-import
from plaso.parsers import cookie_plugins
from plaso.parsers.cookie_plugins import interface as cookie_interface
from plaso.parsers.sqlite_plugins import interface
class ChromeCookieEvent(event.WebKitTimeEvent):
"""Convenience class for a Chrome Cookie event."""
DATA_TYPE = 'chrome:cookie:entry'
def __init__(self, timestamp, usage, hostname, cookie_name, value,
path, secure, httponly, persistent):
"""Initializes the event.
Args:
timestamp: The timestamp value in WebKit format..
usage: Timestamp description string.
hostname: The hostname of host that set the cookie value.
cookie_name: The name field of the cookie.
value: The value of the cookie.
path: An URI of the page that set the cookie.
secure: Indication if this cookie should only be transmitted over a secure
channel.
httponly: An indication that the cookie cannot be accessed through client
side script.
persistent: A flag indicating cookies persistent value.
"""
super(ChromeCookieEvent, self).__init__(timestamp, usage)
if hostname.startswith('.'):
hostname = hostname[1:]
self.host = hostname
self.cookie_name = cookie_name
self.data = value
self.path = path
self.secure = True if secure else False
self.httponly = True if httponly else False
self.persistent = True if persistent else False
if self.secure:
scheme = u'https'
else:
scheme = u'http'
self.url = u'{}://{}{}'.format(scheme, hostname, path)
class ChromeCookiePlugin(interface.SQLitePlugin):
"""Parse Chrome Cookies file."""
NAME = 'chrome_cookies'
# Define the needed queries.
QUERIES = [(('SELECT creation_utc, host_key, name, value, path, expires_utc,'
'secure, httponly, last_access_utc, has_expires, persistent '
'FROM cookies'), 'ParseCookieRow')]
# The required tables common to Archived History and History.
REQUIRED_TABLES = frozenset(['cookies', 'meta'])
# Point to few sources for URL information.
URLS = [
u'http://src.chromium.org/svn/trunk/src/net/cookies/',
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
# Google Analytics __utmz variable translation.
# Taken from:
# http://www.dfinews.com/sites/dfinews.com/files/u739/Tab2Cookies020312.jpg
GA_UTMZ_TRANSLATION = {
'utmcsr': 'Last source used to access.',
'utmccn': 'Ad campaign information.',
'utmcmd': 'Last type of visit.',
'utmctr': 'Keywords used to find site.',
'utmcct': 'Path to the page of referring link.'}
def __init__(self, pre_obj):
"""Initialize the plugin."""
super(ChromeCookiePlugin, self).__init__(pre_obj)
self._cookie_plugins = cookie_interface.GetPlugins(
pre_obj, ChromeCookieEvent.DATA_TYPE)
def ParseCookieRow(self, row, **unused_kwargs):
"""Parses a cookie row.
Args:
row: The row resulting from the query.
Yields:
An event object (instance of ChromeCookieEvent) containing the event
data.
"""
yield ChromeCookieEvent(
row['creation_utc'], eventdata.EventTimestamp.CREATION_TIME,
row['host_key'], row['name'], row['value'], row['path'], row['secure'],
row['httponly'], row['persistent'])
yield ChromeCookieEvent(
row['last_access_utc'], eventdata.EventTimestamp.ACCESS_TIME,
row['host_key'], row['name'], row['value'], row['path'], row['secure'],
row['httponly'], row['persistent'])
if row['has_expires']:
yield ChromeCookieEvent(
row['expires_utc'], 'Cookie Expires',
row['host_key'], row['name'], row['value'], row['path'],
row['secure'], row['httponly'], row['persistent'])
# Go through all cookie plugins to see if there are is any specific parsing
# needed.
for cookie_plugin in self._cookie_plugins:
try:
for event_object in cookie_plugin.Process(
cookie_name=row['name'], cookie_data=row['value']):
# TODO: Reduce repeated code and combine with ChromeCookieEvent.
# This might need to take place after EventObject redesign
# to make sure it can be done (side effect of removing conatiners).
event_object.httponly = True if row['httponly'] else False
event_object.persistent = True if row['persistent'] else False
event_object.cookie_name = row['name']
hostname = row['host_key']
if hostname.startswith('.'):
hostname = hostname[1:]
event_object.host = hostname
event_object.url = u'http{:s}://{:s}{:s}'.format(
u's' if row['secure'] else u'', hostname, row['path'])
yield event_object
except errors.WrongPlugin:
pass
| iwm911/plaso | plaso/parsers/sqlite_plugins/chrome_cookies.py | Python | apache-2.0 | 5,703 | [
"VisIt"
] | f9f05c4acb89a831fd24a9c2b0fe3bf34fd254280d57c0ef4fca7df5b5db10dd |
"""Contains the base class for the Snux driver overlay.
This class overlays an existing WPC-compatible platform interface to work with
Mark Sunnucks's System 11 interface board.
"""
# snux.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
from mpf.system.tasks import DelayManager
from mpf.system.timing import Timer
class Snux(object):
def __init__(self, machine, platform):
self.log = logging.getLogger('Platform.Snux')
self.delay = DelayManager()
self.machine = machine
self.platform = platform
self.system11_config = None
self.snux_config = None
self.ac_relay_delay_ms = 100
self.special_drivers = set()
self.diag_led = None
'''Diagnostics LED (LED 3) on the Snux board turns on solid when MPF
first connects, then starts flashing once the MPF init is done.'''
self.ac_relay = None
self.flipper_relay = None
self.ac_relay_enabled = False # disabled = A, enabled = C
self.a_side_queue = set()
self.c_side_queue = set()
self.a_drivers = set()
self.c_drivers = set()
self.a_side_done_time = 0
self.c_side_done_time = 0
self.drivers_holding_a_side = set()
self.drivers_holding_c_side = set()
# self.a_side_busy = False # This is a property
# self.c_side_active = False # This is a property
self.a_side_enabled = True
self.c_side_enabled = False
self.ac_relay_in_transition = False
self._morph()
@property
def a_side_busy(self):
if (self.drivers_holding_a_side or
self.a_side_done_time > time.time() or
self.a_side_queue):
return True
else:
return False
@property
def c_side_active(self):
if self.drivers_holding_c_side or self.c_side_done_time > time.time():
return True
else:
return False
def null_log_handler(self, *args, **kwargs):
pass
def _morph(self):
self.platform_configure_driver = self.platform.configure_driver
self.platform.configure_driver = self.configure_driver
self.platform_write_hw_rule = self.platform.write_hw_rule
self.platform.write_hw_rule = self.write_hw_rule
def initialize(self):
"""Automatically called by the Platform class after all the system
modules are loaded.
"""
self._validate_config()
self.log.debug("Configuring Snux Diag LED for driver %s",
self.snux_config['diag_led_driver_number'])
self.diag_led, _ = self.platform_configure_driver(
{'number': self.snux_config['diag_led_driver_number'],
'allow_enable': True})
self.diag_led.log.info = self.null_log_handler
self.diag_led.log.debug = self.null_log_handler
self.diag_led.enable()
self.special_drivers.add(
self.snux_config['diag_led_driver_number'].lower())
self.log.debug("Configuring A/C Select Relay for driver %s",
self.system11_config['ac_relay_driver_number'])
self.ac_relay, _ = self.platform_configure_driver(
{'number': self.system11_config['ac_relay_driver_number'],
'allow_enable': True})
self.special_drivers.add(
self.system11_config['ac_relay_driver_number'].lower())
self.log.debug("Configuring A/C Select Relay transition delay for "
"%sms", self.system11_config['ac_relay_delay_ms'])
self.ac_relay_delay_ms = self.system11_config['ac_relay_delay_ms']
self.flipper_relay, _ = self.platform_configure_driver(
{'number': self.snux_config['flipper_enable_driver_number'],
'allow_enable': True})
self.log.debug("Configuring Flipper Enable for driver %s",
self.snux_config['flipper_enable_driver_number'])
self.machine.events.add_handler('init_phase_5',
self._initialize_phase_2)
def _initialize_phase_2(self):
self.machine.timing.add(
Timer(callback=self.flash_diag_led, frequency=0.5))
self.machine.events.add_handler('timer_tick', self._tick)
def _validate_config(self):
self.system11_config = self.machine.config_processor.process_config2(
'system11', self.machine.config['system11'])
snux = self.machine.config.get('snux', dict())
self.snux_config = self.machine.config_processor.process_config2(
'snux', snux)
def _tick(self):
# Called based on the timer_tick event
if self.a_side_queue:
self._service_a_side()
elif self.c_side_queue:
self._service_c_side()
elif self.c_side_enabled and not self.c_side_active:
self._enable_a_side()
def flash_diag_led(self):
self.diag_led.pulse(250)
def configure_driver(self, config, device_type='coil'):
# If the user has configured one of the special drivers in their
# machine config, don't set it up since that could let them do weird
# things.
if config['number'].lower() in self.special_drivers:
return
orig_number = config['number']
if (config['number'].lower().endswith('a') or
config['number'].lower().endswith('c')):
config['number'] = config['number'][:-1]
platform_driver, _ = (
self.platform_configure_driver(config, device_type))
snux_driver = SnuxDriver(orig_number, platform_driver, self)
if orig_number.lower().endswith('a'):
self._add_a_driver(snux_driver.platform_driver)
elif orig_number.lower().endswith('c'):
self._add_c_driver(snux_driver.platform_driver)
return snux_driver, orig_number
else:
return self.platform_configure_driver(config, device_type)
def write_hw_rule(self, switch_obj, sw_activity, driver_obj, driver_action,
disable_on_release, drive_now,
**driver_settings_overrides):
"""On system 11 machines, Switched drivers cannot be configured with
autofire hardware rules.
"""
if driver_obj in self.a_drivers or driver_obj in self.c_drivers:
self.log.warning("Received a request to set a hardware rule for a"
"switched driver. Ignoring")
else:
self.platform_write_hw_rule(switch_obj, sw_activity, driver_obj,
driver_action, disable_on_release,
drive_now,
**driver_settings_overrides)
def driver_action(self, driver, milliseconds):
"""Adds a driver action for a switched driver to the queue (for either
the A-side or C-side queue).
Args:
driver: A reference to the original platform class Driver instance.
milliseconds: Integer of the number of milliseconds this action is
for. 0 = pulse, -1 = enable (hold), any other value is a timed
action (either pulse or long_pulse)
This action will be serviced immediately if it can, or ASAP otherwise.
"""
if driver in self.a_drivers:
self.a_side_queue.add((driver, milliseconds))
self._service_a_side()
elif driver in self.c_drivers:
self.c_side_queue.add((driver, milliseconds))
if not self.ac_relay_in_transition and not self.a_side_busy:
self._service_c_side()
def _enable_ac_relay(self):
self.ac_relay.enable()
self.ac_relay_in_transition = True
self.a_side_enabled = False
self.c_side_enabled = False
self.delay.add(ms=self.ac_relay_delay_ms,
callback=self._c_side_enabled,
name='enable_ac_relay')
def _disable_ac_relay(self):
self.ac_relay.disable()
self.ac_relay_in_transition = True
self.a_side_enabled = False
self.c_side_enabled = False
self.delay.add(ms=self.ac_relay_delay_ms,
callback=self._a_side_enabled,
name='disable_ac_relay')
# -------------------------------- A SIDE ---------------------------------
def _enable_a_side(self):
if not self.a_side_enabled and not self.ac_relay_in_transition:
if self.c_side_active:
self._disable_all_c_side_drivers()
self.delay.add(ms=self.ac_relay_delay_ms,
callback=self._enable_a_side,
name='enable_a_side')
return
elif self.c_side_enabled:
self._disable_ac_relay()
else:
self._a_side_enabled()
def _a_side_enabled(self):
self.ac_relay_in_transition = False
self.a_side_enabled = True
self.c_side_enabled = False
self._service_a_side()
def _service_a_side(self):
if not self.a_side_queue:
return
elif not self.a_side_enabled:
self._enable_a_side()
return
while self.a_side_queue:
driver, ms = self.a_side_queue.pop()
if ms > 0:
driver.pulse(ms)
self.a_side_done_time = max(self.a_side_done_time,
time.time() + (ms / 1000.0))
elif ms == -1:
driver.enable()
self.drivers_holding_a_side.add(driver)
else: # ms == 0
driver.disable()
try:
self.drivers_holding_a_side.remove(driver)
except KeyError:
pass
def _add_a_driver(self, driver):
self.a_drivers.add(driver)
# -------------------------------- C SIDE ---------------------------------
def _enable_c_side(self):
if (not self.ac_relay_in_transition and
not self.c_side_enabled and
not self.a_side_busy):
self._enable_ac_relay()
elif self.c_side_enabled and self.c_side_queue:
self._service_c_side()
def _c_side_enabled(self):
self.ac_relay_in_transition = False
if self.a_side_queue:
self._enable_a_side()
return
self.a_side_enabled = False
self.c_side_enabled = True
self._service_c_side()
def _service_c_side(self):
if not self.c_side_queue:
return
if self.ac_relay_in_transition or self.a_side_busy:
return
elif not self.c_side_enabled:
self._enable_c_side()
return
while self.c_side_queue:
driver, ms = self.c_side_queue.pop()
if ms > 0:
driver.pulse(ms)
self.c_side_done_time = max(self.c_side_done_time,
time.time() + (ms / 1000.))
elif ms == -1:
driver.enable()
self.drivers_holding_c_side.add(driver)
else: # ms == 0
driver.disable()
try:
self.drivers_holding_c_side.remove(driver)
except KeyError:
pass
def _add_c_driver(self, driver):
self.c_drivers.add(driver)
def _disable_all_c_side_drivers(self):
if self.c_side_active:
for driver in self.c_drivers:
driver.disable()
self.drivers_holding_c_side = set()
self.c_side_done_time = 0
self.c_side_enabled = False
class SnuxDriver(object):
def __init__(self, number, platform_driver, overlay):
self.number = number
self.platform_driver = platform_driver
self.driver_settings = platform_driver.driver_settings
self.overlay = overlay
def __repr__(self):
return "SnuxDriver.{}".format(self.number)
def pulse(self, milliseconds=None, **kwargs):
if milliseconds is None:
milliseconds = self.platform_driver.get_pulse_ms()
self.overlay.driver_action(self.platform_driver, milliseconds)
# Usually pulse() returns the value (in ms) that the driver will pulse
# for so we can update Driver.time_when_done. But with A/C switched
# coils, we don't know when exactly that will be, so we return -1
return -1
def enable(self, **kwargs):
self.overlay.driver_action(self.platform_driver, -1)
def disable(self, **kwargs):
self.overlay.driver_action(self.platform_driver, 0)
driver_overlay_class = Snux
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| spierepf/mpf | mpf/platform/snux.py | Python | mit | 14,196 | [
"Brian"
] | 4c65c5f8082fbaa3a7893b47d7f444e41b0d9a1cf9889d558de7c217f1df9831 |
###############################################################################
# Copyright 2017-2021 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "fstd2nc" package.
#
# "fstd2nc" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "fstd2nc" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "fstd2nc". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from fstd2nc.stdout import _, info, warn, error
from fstd2nc.mixins import BufferBase
# Helper function - apply metadata and renames to the variables.
def apply_metadata (b):
if hasattr(b,'_metadata'):
# Apply the user-supplied metadata.
for obj in b._iter_objects():
# Add extra metadata provided by the user?
if obj.name in b._metadata:
for attname, attval in b._metadata[obj.name].items():
# Check if attribute should be removed?
# (i.e., if value is intentionally left empty)
if attval == "": obj.atts.pop(attname,None)
else: obj.atts[attname] = attval
# Apply renames.
if obj.name in b._renames:
obj.name = b._renames[obj.name]
#################################################
# Mixin for adding netCDF metadata to the variables
class netCDF_Atts (BufferBase):
@classmethod
def _cmdline_args (cls, parser):
import argparse
super(netCDF_Atts,cls)._cmdline_args(parser)
parser.add_argument('--metadata-file', type=argparse.FileType('r'), action='append', help=_('Use metadata from the specified file. You can repeat this option multiple times to build metadata from different sources.'))
parser.add_argument('--rename', metavar="OLDNAME=NEWNAME,...", help=_('Apply the specified name changes to the variables.'))
parser.add_argument('--conventions', default='CF-1.6', help=_('Set the "Conventions" attribute for the netCDF file. Default is "%(default)s". Note that this has no effect on the structure of the file.'))
parser.add_argument('--no-conventions', action='store_true', help=_('Omit the "Conventions" attribute from the netCDF file entirely. This can help for netCDF tools that have trouble recognizing the CF conventions encoded in the file.'))
@classmethod
def _check_args (cls, parser, args):
super(netCDF_Atts,cls)._check_args(parser,args)
if args.rename is not None:
try:
dict(r.split('=') for r in args.rename.split(','))
except ValueError:
parser.error(_("Unable to parse the rename arguments."))
def __init__ (self, *args, **kwargs):
"""
metadata_file : str or list, optional
Use metadata from the specified file(s).
rename : str or dict, optional
Apply the specified name changes to the variables.
conventions : str, optional
Set the "Conventions" attribute for the netCDF file. Default is "CF-1.6".
Note that this has no effect on the structure of the file.
no_conventions : bool, optional
Omit the "Conventions" attribute from the netCDF file entirely.
This can help for netCDF tools that have trouble recognizing the CF
conventions encoded in the file.
"""
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
from collections import OrderedDict
import numpy as np
metadata_file = kwargs.pop('metadata_file',None)
if metadata_file is None:
metafiles = []
elif isinstance(metadata_file,str):
metafiles = [metadata_file]
else:
metafiles = metadata_file
# Open the files if only the filename is provided.
metafiles = [open(m,'r') if isinstance(m,str) else m for m in metafiles]
metadata = OrderedDict()
# Set some global defaults.
# We need to explicitly state that we're using CF conventions in our
# output files, or some utilities (like IDV) won't accept the data.
conventions = kwargs.pop('conventions',"CF-1.6")
if not kwargs.pop('no_conventions',False):
metadata['global'] = OrderedDict(Conventions = conventions)
# Read the metadata files.
configparser = ConfigParser.SafeConfigParser()
configparser.optionxform = str # Make the attribute names case sensitive.
for metafile in metafiles:
configparser.readfp(metafile)
for varname in configparser.sections():
metadata.setdefault(varname,OrderedDict()).update(configparser.items(varname))
# Detect numerical values
for k,v in list(metadata[varname].items()):
# Skip things that should always be string values, even if they
# happen to contain numbers.
if k == 'units': continue
try:
metadata[varname][k] = float(v) # First, try converting to float.
metadata[varname][k] = np.int32(v) # Try further conversion to int.
except ValueError: pass
self._metadata = metadata
# Check for renames.
# Will override any renames specified in the metadata file.
rename = kwargs.pop('rename',None)
if rename is None:
rename = {}
if isinstance(rename,str):
rename = [r.split('=') for r in rename.split(',')]
rename = [(k.strip(),v.strip()) for k,v in rename]
rename = dict(rename)
for oldname, newname in rename.items():
self._metadata.setdefault(oldname,OrderedDict())['rename'] = newname
# Check for metadata entries under the new name.
# I.e., in conjunction with --rename used on command-line.
for oldname, newname in rename.items():
if newname in self._metadata:
self._metadata[oldname].update(self._metadata[newname])
super(netCDF_Atts,self).__init__(*args,**kwargs)
def _makevars (self):
super(netCDF_Atts,self)._makevars()
# Extract variable rename requests from the user-supplied metadata.
self._renames = {}
for varname, atts in self._metadata.items():
if 'rename' in atts:
self._renames[varname] = atts.pop('rename')
# Apply the user-supplied metadata.
apply_metadata (self)
#################################################
# Mixin for reading/writing FSTD data from/to netCDF files.
class netCDF_IO (BufferBase):
@classmethod
def _cmdline_args (cls, parser):
super(netCDF_IO,cls)._cmdline_args(parser)
parser.add_argument('--time-units', choices=['seconds','minutes','hours','days'], default='hours', help=_('The units for the output time axis. Default is %(default)s.'))
parser.add_argument('--reference-date', metavar=_('YYYY-MM-DD'), help=_('The reference date for the output time axis. The default is the starting date in the RPN file.'))
@classmethod
def _check_args (cls, parser, args):
from datetime import datetime
super(netCDF_IO,cls)._check_args(parser,args)
# Parse the reference date into a datetime object.
if args.reference_date is not None:
try:
datetime.strptime(args.reference_date,'%Y-%m-%d')
except ValueError:
parser.error(_("Unable to to parse the reference date '%s'. Expected format is '%s'")%(args.reference_date,_('YYYY-MM-DD')))
def __init__ (self, *args, **kwargs):
self._time_units = kwargs.pop('time_units','hours')
self._reference_date = kwargs.pop('reference_date',None)
self._unique_names = kwargs.pop('unique_names',True)
super(netCDF_IO,self).__init__(*args,**kwargs)
def _makevars (self):
from fstd2nc.mixins import _var_type
from datetime import datetime
import numpy as np
from netCDF4 import date2num
super(netCDF_IO,self)._makevars()
if self._reference_date is None:
reference_date = None
else:
reference_date = datetime.strptime(self._reference_date,'%Y-%m-%d')
# Check if time axis can be made an unlimited dimension.
# Can only work if it only appears as the outermost dimension, otherwise
# the netCDF4 module will crash (maybe a bug with netCDF4?)
self._time_unlimited = True
for var in self._varlist:
if 'time' not in var.dims: continue
if var.dims.index('time') > 0:
self._time_unlimited = False
# Generate unique names for output data.
if self._unique_names:
self._fix_names()
# Hack: re-apply metadata for the final (renamed) variables.
apply_metadata(self)
for var in self._iter_objects():
# Modify time axes to be relative units instead of datetime objects.
# Also attach relevant metadata.
if hasattr(var,'array') and isinstance(var.array.reshape(-1)[0],np.datetime64):
# Convert from np.datetime64 to datetime.datetime
# .tolist() only returns a datetime object for datetime64[s], not
# for datetime64[ns].
var.array = np.asarray(var.array, dtype='datetime64[s]')
# https://stackoverflow.com/a/13703930/9947646
var.array = np.array(var.array.tolist())
units = '%s since %s'%(self._time_units, reference_date or var.array.reshape(-1)[0])
var.atts.update(units=units, calendar='gregorian')
var.array = np.asarray(date2num(var.array,units=units), dtype='double')
for obj in self._iter_objects():
# Encode the attributes so they're ready for writing to netCDF.
# Handles things like encoding coordinate objects to a string.
if hasattr(obj,'atts'):
self._encode_atts(obj)
# Helper method - prepare attributes for writing to netCDF.
@staticmethod
def _encode_atts (obj):
from collections import OrderedDict
atts = obj.atts
for attname, attval in list(atts.items()):
# Detect veriable reference, convert to string.
if hasattr(attval,'name'):
# Store the dependency for future use.
obj.deps.append(attval)
atts[attname] = attval.name
# Detect list of objects, convert to space-separated string.
elif isinstance(attval,list):
if any(hasattr(v,'name') for v in attval):
# Store the dependencies for future use.
obj.deps.extend(attval)
# Convert attribute to string.
atts[attname] = ' '.join(v.name for v in attval)
# Remove the attribute if the list of objects is empty
elif len(attval) == 0:
atts.pop(attname)
# Detect dictionaries, convert to "key1: value1 key2: value2"
elif isinstance(attval,OrderedDict):
if len(attval) > 0:
# Store the dependencies for future use.
obj.deps.extend([k for k in attval.keys() if hasattr(k,'name')])
obj.deps.extend([v for v in attval.values() if hasattr(v,'name')])
# Convert attribute to string.
attval = [getattr(k,'name',k)+': '+getattr(v,'name',v) for k,v in attval.items()]
atts[attname] = ' '.join(attval)
# Remove the attribute if the list of objects is empty
else:
atts.pop(attname)
def _fix_names (self):
# List of metadata keys that are internal to the FSTD file.
internal_meta = self._headers.keys()
# Generate unique axis names.
axis_table = dict()
for axis in self._iter_axes():
if axis.name not in axis_table:
axis_table[axis.name] = []
axis_table[axis.name].append(axis)
for axisname, axis_list in axis_table.items():
if len(axis_list) == 1: continue
warn (_("Multiple %s axes. Appending integer suffixes to their names.")%axisname)
for i,axis in enumerate(axis_list):
axis.name = axis.name+str(i+1)
# Generate a string-based variable id.
# Only works for true variables from the FSTD source
# (needs metadata like etiket, etc.)
def get_var_id (var):
out = []
for fmt in self._human_var_id:
try:
out.append(fmt%var.atts)
except KeyError:
out.append(None)
# Check if no useful information found to construct any part of the id.
if list(set(out)) == [None]: raise KeyError
return tuple(out)
# Generate unique variable names.
var_table = dict()
for var in self._iter_objects():
if var.name not in var_table:
var_table[var.name] = []
# Identify the variables by their index in the master list.
var_table[var.name].append(var)
for varname, var_list in var_table.items():
# Only need to rename variables that are non-unique.
if len(var_list) == 1: continue
try:
var_ids = [get_var_id(v) for v in var_list]
except KeyError:
# Some derived axes may not have enough metadata to generate an id,
# so the best we can do is append an integer suffix.
var_ids = [(str(r),) for r in range(1,len(var_list)+1)]
var_ids = zip(*var_ids)
# Omit parts of the var_id that are invariant over all the variables.
var_ids = [var_id for var_id in var_ids if len(set(var_id)) > 1]
# Starting from the rightmost key, remove as many keys as possible while
# maintaining uniqueness.
for j in reversed(range(len(var_ids))):
test = var_ids[:j] + var_ids[j+1:]
# If variables are still unique without this part of the id, then it
# can be safely removed.
# Also, can remove this part if it's not consistently defined.
# (E.g. if one of the versions of the variable does not have a
# consistent version of an attribute to use).
if len(set(zip(*test))) == len(var_list) or None in var_ids[j]:
var_ids = test
var_ids = zip(*var_ids)
var_ids = ['_'.join(var_id) for var_id in var_ids]
warn (_("Multiple definitions of %s. Adding unique suffixes %s.")%(varname, ', '.join(var_ids)))
# Apply the name changes.
for var, var_id in zip(var_list, var_ids):
var.name = var.name + '_' + var_id
for var in self._iter_objects():
# Names must start with a letter or underscore.
if not var.name[0].isalpha() and var.name[0] != '_':
warn(_("Renaming '%s' to '_%s'.")%(var.name,var.name))
var.name = '_'+var.name
# Strip out FSTD-specific metadata?
if not hasattr(var,'atts'): continue
if self._rpnstd_metadata_list is not None:
for n in internal_meta:
if n not in self._rpnstd_metadata_list:
var.atts.pop(n,None)
def to_netcdf (self, filename, nc_format='NETCDF4', global_metadata=None, zlib=False, compression=4, progress=False):
"""
Write the records to a netCDF file.
Requires the netCDF4 package.
"""
from fstd2nc.mixins import _var_type, _ProgressBar, _FakeBar
from netCDF4 import Dataset
import numpy as np
f = Dataset(filename, "w", format=nc_format)
# Apply global metadata (from config files and global_metadata argument).
if 'global' in getattr(self,'_metadata',{}):
f.setncatts(self._metadata['global'])
if global_metadata is not None:
f.setncatts(global_metadata)
# Collect all the records that will be read/written.
# List of (key,recshape,ncvar,ncind).
# Note: derived variables (with values stored in memory) will be written
# immediately, bypassing this list.
io = []
self._makevars()
# Define the dimensions.
for axis in self._iter_axes():
# Special case: make the time dimension unlimited.
if axis.name == 'time' and self._time_unlimited:
f.createDimension(axis.name, None)
else:
f.createDimension(axis.name, len(axis))
# Generate the variable structures.
for var in self._iter_objects():
# Write the variable.
# Easy case: already have the data.
if hasattr(var,'array'):
v = f.createVariable(var.name, datatype=var.array.dtype, dimensions=var.dims, zlib=zlib, complevel=compression)
# Write the metadata.
v.setncatts(var.atts)
v[()] = var.array
continue
# Hard case: only have the record indices, need to loop over the records.
# Get the shape of a single record for the variable.
if hasattr(var,'record_id'):
record_shape = var.shape[var.record_id.ndim:]
elif hasattr(var,'chunksize'):
record_shape = var.chunksize
else:
continue
# Use this as the "chunk size" for the netCDF file, to improve I/O
# performance.
chunksizes = (1,)*(len(var.axes)-len(record_shape)) + record_shape
if hasattr(self,'_fill_value') and var.dtype.name.startswith('float32'):
fill_value = self._fill_value
else:
fill_value = None
# netCDF3 can't handle unsigned ints, so cast to signed.
dtype = var.dtype
if dtype.name.startswith('uint') and nc_format.startswith('NETCDF3'):
warn (_("netCDF3 does not support unsigned ints. Converting %s to signed int.")%var.name)
dtype = np.dtype(dtype.name[1:])
v = f.createVariable(var.name, datatype=dtype, dimensions=var.dims, zlib=zlib, complevel=compression, chunksizes=chunksizes, fill_value=fill_value)
# Turn off auto scaling of variables - want to encode the values as-is.
# 'scale_factor' and 'add_offset' will only be applied when *reading* the
# the file after it's created.
v.set_auto_scale(False)
# Write the metadata.
v.setncatts(var.atts)
# Write the data.
if hasattr(var,'record_id'):
indices = list(np.ndindex(var.record_id.shape))
keys = map(int,var.record_id.flatten())
else:
indices = list(var.keys())
keys = list(var.chunks.values())
record_shape = None # Reshaping with chunked data not supported.
for r, ind in zip(keys,indices):
if r >= 0:
io.append((r,record_shape,v,ind))
# Check if no data records exist and no coordinates were converted.
if len(io) == 0 and len(f.variables) == 0:
warn(_("No relevant FST records were found."))
# Now, do the actual transcribing of the data.
# Read/write the data in the same order of records in the RPN file(s) to
# improve performance.
Bar = _ProgressBar if (progress is True and len(io) > 0) else _FakeBar
bar = Bar(_("Saving netCDF file"), suffix="%(percent)d%% [%(myeta)s]")
for r,shape,v,ind in bar.iter(sorted(io)):
try:
data = self._fstluk(r,dtype=v.dtype)['d'].transpose().reshape(shape)
v[ind] = data
except (IndexError,ValueError):
warn(_("Internal problem with the script - unable to get data for '%s'")%v.name)
continue
f.close()
# Alias "to_netcdf" as "write_nc_file" for backwards compatibility.
write_nc_file = to_netcdf
| neishm/fstd2nc | fstd2nc/mixins/netcdf.py | Python | lgpl-3.0 | 18,945 | [
"NetCDF"
] | 44129eddd74e4aa92c2f813d63c7c82fd3188e06d5b4cc3a4826ed176f2b643e |
"""
There are two kinds of Repositories in PyChemia
Structure Repositories where many structures are stored
Execution Repositories where the out of every calculation
is stored
Each structure contains some metadata that is accessible with
the StructureEntry object
Also each calculation has it own metadata accessible by ExecutionEntry
object
"""
import hashlib
import json as _json
import os
import uuid as _uuid
import shutil as _shutil
import math
from pychemia.core.structure import load_structure_json
from pychemia.utils.computing import deep_unicode
class StructureEntry:
"""
Defines one entry in the repository of Structures
"""
def __init__(self, structure=None, repository=None, identifier=None, original_file=None, tags=None):
"""
Creates a new Entry for Structures
If identifier is provided the corresponding Structure is load in the Entry
Otherwise a new entry is created with a UUID random identifier
Args:
identifier: (string) UUID identifier for a structure
repository: (object) The StructureRepository that will be associated
original_file: (string) Path to the original file (CIF, POSCAR, etc)
tags: (string or list) Tags that will be associated to that structure
"""
self.properties = None
if identifier is None:
self.structure = structure
self.identifier = str(_uuid.uuid4())
self.path = None
if original_file is not None:
assert (os.path.isfile(original_file))
self.original_file = original_file
self.parents = []
self.children = []
if isinstance(tags, str):
self.tags = [tags]
elif isinstance(tags, list):
self.tags = tags
elif tags is None:
self.tags = []
else:
raise ValueError('The variable tags must be a string or list of strings')
if len(self.structure.composition) == 1:
self.add_tags('pure')
elif len(self.structure.composition) == 2:
self.add_tags('binary')
elif len(self.structure.composition) == 3:
self.add_tags('ternary')
elif len(self.structure.composition) == 4:
self.add_tags('quaternary')
else:
assert (original_file is None)
assert (structure is None)
assert (tags is None)
assert (repository is not None)
self.identifier = identifier
self.repository = repository
self.path = self.repository.path + '/' + self.identifier
if not os.path.isdir(self.path):
raise ValueError("Directory not found: " + self.path)
if not os.path.isfile(self.path + '/metadata.json'):
raise ValueError("No metadata found in " + self.path)
if not os.path.isfile(self.path + '/structure.json'):
raise ValueError("No structure found in " + self.path)
self.load()
def metadatatodict(self):
ret = {'tags': self.tags,
'parents': self.parents,
'children': self.children}
return ret
def load(self):
assert isinstance(self.identifier, str)
rf = open(self.path + '/metadata.json', 'r')
self.metadatafromdict(deep_unicode(_json.load(rf)))
rf.close()
if self.tags is None:
self.tags = []
if self.children is None:
self.children = []
if self.parents is None:
self.parents = []
self.structure = load_structure_json(self.path + '/structure.json')
if os.path.isfile(self.path + '/properties.json'):
rf = open(self.path + '/properties.json', 'r')
try:
self.properties = deep_unicode(_json.load(rf))
except ValueError:
os.rename(self.path + '/properties.json', self.path + '/properties.json.FAILED')
self.properties = None
rf.close()
self.load_originals()
def load_originals(self):
orig_dir = self.path + '/original'
if os.path.isdir(orig_dir):
self.original_file = [os.path.abspath(orig_dir + '/' + x) for x in os.listdir(orig_dir)]
else:
self.original_file = []
def save(self):
if self.path is None:
self.path = self.repository.path + '/' + self.identifier
wf = open(self.path + '/metadata.json', 'w')
_json.dump(self.metadatatodict(), wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
self.structure.save_json(self.path + '/structure.json')
if self.properties is not None:
wf = open(self.path + '/properties.json', 'w')
_json.dump(self.properties, wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
if self.original_file is not None:
self.add_original_file(self.original_file)
def metadatafromdict(self, entrydict):
self.tags = entrydict['tags']
self.parents = entrydict['parents']
self.children = entrydict['children']
def add_tags(self, tags):
_add2list(tags, self.tags)
def add_parents(self, parents):
_add2list(parents, self.parents)
def add_children(self, children):
_add2list(children, self.children)
def add_original_file(self, filep):
orig_dir = self.path + '/original'
if isinstance(filep, str):
filep = [filep]
self.load_originals()
hashs = {}
for iorig in self.original_file:
rf = open(iorig, 'r')
hashs[iorig] = hashlib.sha224(rf.read()).hexdigest()
rf.close()
for ifile in filep:
assert (os.path.isfile(ifile))
rf = open(ifile, 'r')
hash_ifile = hashlib.sha224(rf.read()).hexdigest()
if hash_ifile in hashs.values():
continue
if ifile not in self.original_file:
if not os.path.isdir(orig_dir):
os.mkdir(orig_dir)
if not os.path.isfile(orig_dir + '/' + os.path.basename(ifile)):
_shutil.copy2(ifile, orig_dir)
else:
i = 0
while True:
newfile = ifile + '_' + str(i)
if not os.path.isfile(orig_dir + '/' + os.path.basename(newfile)):
_shutil.copy(ifile, orig_dir + '/' + os.path.basename(newfile))
break
else:
i += 1
self.load_originals()
def __str__(self):
ret = 'Structure: \n' + str(self.structure)
ret += '\nTags: ' + str(self.tags)
ret += '\nParents: ' + str(self.parents)
ret += '\nChildren: ' + str(self.children)
ret += '\nIdentifier: ' + str(self.identifier)
ret += '\nOriginal Files:' + str(self.original_file)
ret += '\n'
return ret
def __eq__(self, other):
ret = True
if self.structure != other.structure:
print('Not equal structure')
ret = False
elif self.children is None and other.children is not None:
ret = False
elif self.children is not None and other.children is None:
ret = False
elif self.children is not None and set(self.children) != set(other.children):
print('Not equal children')
ret = False
elif self.parents is None and other.parents is not None:
ret = False
elif self.parents is not None and other.parents is None:
ret = False
elif self.parents is not None and set(self.parents) != set(other.parents):
print('Not equal parents')
ret = False
elif self.tags is None and other.tags is not None:
ret = False
elif self.tags is not None and other.tags is None:
ret = False
elif self.tags is not None and set(self.tags) != set(other.tags):
print('Not equal tags')
ret = False
return ret
def __ne__(self, other):
return not self.__eq__(other)
class PropertiesEntry:
"""
Defines one calc in the Execution Repository
"""
def __init__(self, structure_entry):
"""
Creates a new calc repository
"""
self.entry = structure_entry
self.properties = {}
def add_property(self, name, values):
self.properties[name] = values
def save(self):
"""
Save an existing repository information
"""
wf = open(self.entry.path + '/properties.json', 'w')
_json.dump(self.properties, wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
def load(self):
"""
Loads an existing db from its configuration file
"""
rf = open(self.path + '/properties.json', 'r')
self.properties = deep_unicode(_json.load(rf))
rf.close()
class StructureRepository:
"""
Defines the location of the executions repository
and structure repository and methods to add, remove
and check those db
"""
def __init__(self, path):
"""
Creates new db for calculations and structures
Args:
path: (string) Directory path for the structure repository
"""
self.path = os.path.abspath(path)
if os.path.isfile(self.path + '/db.json'):
self.load()
else:
self.tags = {}
if os.path.lexists(self.path):
if not os.path.isdir(self.path):
raise ValueError('Path exists already and it is not a directory')
else:
os.mkdir(self.path)
self.save()
def todict(self):
"""
Serialize the values of the db into a dictionary
"""
repos_dict = {'tags': self.tags}
return repos_dict
def fromdict(self, repos_dict):
self.tags = repos_dict['tags']
def save(self):
"""
Save an existing repository information
"""
wf = open(self.path + '/db.json', 'w')
_json.dump(self.todict(), wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
def load(self):
"""
Loads an existing db from its configuration file
"""
rf = open(self.path + '/db.json', 'r')
try:
jsonload = deep_unicode(_json.load(rf))
except ValueError:
print("Error deserializing the object")
jsonload = {'tags': {}}
self.fromdict(jsonload)
rf.close()
def rebuild(self):
ids = self.get_all_entries
self.tags = {}
for ident in ids:
struct_entry = StructureEntry(identifier=ident, repository=self)
for i in struct_entry.tags:
if i in self.tags:
self.tags[i].append(ident)
else:
self.tags[i] = [ident]
self.save()
@property
def get_all_entries(self):
return [x for x in os.listdir(self.path) if os.path.isfile(self.path + '/' + x + '/metadata.json')]
def __len__(self):
return len(self.get_all_entries)
def get_formulas(self):
formulas = {}
for i in self.get_all_entries:
ientry = StructureEntry(repository=self, identifier=i)
formula = ientry.structure.formula
if formula in formulas:
formulas[formula].append(i)
else:
formulas[formula] = [i]
return formulas
def merge2entries(self, orig, dest):
assert (orig.structure == dest.structure)
dest.add_parents(orig.parents)
dest.add_children(orig.children)
dest.add_tags(orig.tags)
if orig.original_file is not None and len(orig.original_file) > 0:
dest.add_original_file(orig.original_file)
dest.save()
self.del_entry(orig)
def clean(self):
for i in self.tags:
for j in self.tags[i]:
if not os.path.isdir(self.path + '/' + j) or not os.path.isfile(self.path + '/' + j + '/metadata.json'):
print('Removing', j)
self.tags[i].remove(j)
self.save()
def refine(self):
formulas = self.get_formulas()
for j in formulas:
print(j)
if len(formulas[j]) > 1:
for i in range(len(formulas[j]) - 1):
stru1 = StructureEntry(repository=self, identifier=formulas[j][i])
stru2 = StructureEntry(repository=self, identifier=formulas[j][i + 1])
if stru1 == stru2:
self.merge2entries(stru1, stru2)
self.save()
def merge(self, other):
"""
Add all the contents from other db into the
calling object
:param other: StructureRepository
"""
conflict_entries = []
for i in other.get_all_enties:
if i in self.get_all_entries:
other_structure = StructureEntry(repository=other, identifier=i)
this_structure = StructureEntry(repository=self, identifier=i)
if this_structure != other_structure:
conflict_entries.append(i)
if len(conflict_entries) == 0:
for i in other.get_all_enties:
if i not in self.get_all_entries:
_shutil.copytree(other.path + '/' + i, self.path + '/' + i)
else:
print('Conflict entries found, No merge done')
return conflict_entries
def add_entry(self, entry):
"""
Add a new StructureEntry into the repository
"""
entry.repository = self
entry.path = self.path + '/' + entry.identifier
if not os.path.isdir(entry.path):
os.mkdir(entry.path)
entry.save()
if entry.tags is not None:
for itag in entry.tags:
if itag in self.tags:
if entry.identifier not in self.tags[itag]:
self.tags[itag].append(entry.identifier)
else:
self.tags[itag] = [entry.identifier]
self.save()
def add_many_entries(self, list_of_entries, tag, number_threads=1):
from threading import Thread
from pychemia.external.pymatgen import cif2structure
def worker(cifs, tags, results):
results['succeed'] = []
results['failed'] = []
for icif in cifs:
try:
struct = cif2structure(icif, primitive=True)
except ValueError:
struct = None
results['failed'].append(icif)
if struct is not None:
structentry = StructureEntry(structure=struct, original_file=icif, tags=[tags])
self.add_entry(structentry)
results['succeed'].append(icif)
th = []
result_list = []
num = int(math.ceil(float(len(list_of_entries)) / number_threads))
for i in range(number_threads):
result_list.append({})
th.append(Thread(target=worker,
args=(
list_of_entries[i * num:min((i + 1) * num, len(list_of_entries))], tag,
result_list[i])))
for i in th:
i.start()
return th, result_list
def del_entry(self, entry):
print('Deleting ', entry.identifier)
for i in entry.tags:
self.tags[i].remove(entry.identifier)
_shutil.rmtree(entry.path)
def __str__(self):
ret = 'Location: ' + self.path
ret += '\nNumber of entries: ' + str(len(self))
if len(self.tags) > 0:
for itag in self.tags:
ret += '\n\t' + itag + ':'
ret += '\n' + str(self.tags[itag])
else:
ret += '\nTags: ' + str(self.tags)
return ret
def structure_entry(self, ident):
return StructureEntry(repository=self, identifier=ident)
class ExecutionRepository:
"""
Defines the location and properties of the Repository
where all the executions will be stored
"""
def __init__(self):
"""
Creates a Repository for Executions
"""
pass
def _add2list(orig, dest):
if isinstance(orig, str):
if orig not in dest:
dest.append(orig)
elif isinstance(orig, list):
for iorig in dest:
if iorig not in dest:
dest.append(iorig)
| MaterialsDiscovery/PyChemia | pychemia/db/repo.py | Python | mit | 16,947 | [
"pymatgen"
] | eb549a37fb5921231e8655e063ab0d05a7c2a27302c9a03db052f12e79805fb0 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkIVWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkIVWriter(), 'Writing vtkIV.',
('vtkIV',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkIVWriter.py | Python | bsd-3-clause | 460 | [
"VTK"
] | d3a0abf956db319dbb422dc6294cc0215d54b786b16b70cda735ea8b4ced13ab |
import random
from main import Game
import game
import objects
import libtcodpy as libtcod
import items
from main import game_instance
class Monster(object):
def init(self,*a): pass
def take_turn(self): pass
def load_data(self, data):
for k,v in data.items():
setattr(self, k,v)
return self
class BasicMonster(Monster):
def take_turn(self):
monster = self.owner
if game_instance.player.can_see(monster.x, monster.y):
if monster.distance_to(game_instance.player) > 1:
dx,dy = monster.move_towards(game_instance.player.x, game_instance.player.y)
counter = 0
while (dx,dy) == (0,0) and counter < 10: # wiggle around if stuck
counter += 1
dx,dy = monster.move(random.randrange(-1,2,2), random.randrange(-1,2,2))
#print 'wiggled %s times' % counter
elif game_instance.player.fighter.hp > 0:
monster.fighter.attack(game_instance.player)
class Thief(BasicMonster):
def init(self, level):
self.level = level
self.player = level.player
self.inventory = []
self.skill = self.skill / 100.0
def take_turn(self):
if self.player.distance(self.owner.x, self.owner.y) < 2 and random.random() < .7:
self.steal()
else:
BasicMonster.take_turn(self)
def steal(self):
if self.player.inventory.keys():
print self.player.inventory.keys()
game_instance.message( ('%s can\'t find anything to steal'%self.owner.name).capitalize(), libtcod.orange )
obj = random.choice(self.player.inventory.keys())
game_instance.message( ('%s tries to steal %s'%(self.owner.name,obj)).capitalize(), libtcod.red)
if random.random() < self.skill:
game_instance.message( ('%s successfully steals %s'%(self.owner.name,obj)).capitalize(), libtcod.orange)
obj = self.player.inventory[obj]
self.inventory.append(obj)
del self.player.inventory[obj.name]
def death(self):
monster_death(self.owner)
for item in self.inventory:
self.drop(item)
def drop(self, item):
print 'drop'
item.x, item.y = self.owner.pos
self.level.add_object(item)
self.inventory.remove(item)
class DjikstraMonster(Monster):
maps = {}
def init(self, level):
self.level = level
self.owner.always_visible = True
self.opos = self.owner.x, self.owner.y
self.ppos = None
map = level.map
def take_turn(self):
pos = self.owner.x, self.owner.y
dx,dy = 0,0
player_room = self.level.player.get_room()
if self.level.is_visible(*pos):
if self.level.player.distance(*pos) < 2:
self.owner.fighter.attack(game_instance.player)
else:
dx, dy = self.owner.get_step_towards(*self.level.player.pos)
else:
dj = self.level.get_djikstra(*self.owner.pos)
path = libtcod.dijkstra_path_set(dj, *self.level.player.pos)
x,y = libtcod.dijkstra_path_walk(dj)
if x is not None:
dx = x - self.owner.x
dy = y - self.owner.y
else:
print '!'
self.owner.move(dx,dy)
class AdvancedMonster(Monster):
def perimeter(self, rect):
for dx,row in enumerate(rect, -1):
for dy, cell in enumerate(row, -1):
if (dx in {-1,1}) or (dy in {-1,1}):
yield dx,dy, cell
def take_turn(self):
monster = self.owner
if not game_instance.player.can_see(monster.x, monster.y):
return
elif monster.distance_to(game_instance.player) > 1:
x,y = monster.x, monster.y
player_x, player_y = game_instance.player.pos
neighborhood = [ [0,0,0], [0,0,0], [0,0,0] ]
for dx in range(-1,2):
for dy in range(-1,2):
new_x = x+dx
new_y = y+dy
neighborhood[dx+1][dy+1] += int(monster.level.is_blocked(x+dx, y+dy))
dx, dy = monster.get_step_towards(player_x, player_y)
if neighborhood[dx+1][dy+1]:
open = []
for dx,dy, cell in self.perimeter(neighborhood):
if not cell:
open.append( (dx,dy) )
open = sorted(open, key=lambda (a,b): abs(a-dx)+abs(b-dy))[:3]
dx,dy = random.choice(open)
monster.move(dx,dy)
else:
monster.fighter.attack(game_instance.player)
class ConfusedMonster(Monster):
def __init__(self, num_turns=game_instance.CONFUSE_NUM_TURNS):
self.num_turns = num_turns
def attach(self, object):
self.old_ai = object.ai
self.owner = object
object.ai = self
def take_turn(self):
if self.num_turns > 0:
game.message('%s is confused' % self.owner.name)
op = get_closest_monster(self.owner, game_instance.player)
if self.owner.distance_to(op) >= 2:
self.owner.move_towards(op.x, op.y)
else:
game.message('%s attacks %s in his confusion' % (self.owner.name, op.name))
if self.owner.fighter:
self.owner.fighter.attack(op)
if op.fighter:
op.fighter.attack(self.owner)
self.num_turns -= 1
else:
self.owner.ai = self.old_ai
game.message('%s is no longer confused' % self.owner.name)
def monster_death(monster):
monster.char = '\x09'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of %s' % monster.name
monster.send_to_back()
import functools
monster_at = functools.partial(game_instance.player.object_at,
filter=lambda obj: obj.fighter and obj is not game_instance.player
)
get_closest_monster = functools.partial(game_instance.player.get_closest_object,
filter=lambda obj: obj.fighter
)
get_visible_monsters = functools.partial(game_instance.player.get_visible_objects,
filter=lambda obj: obj.fighter
)
#####
import yaml
import os.path
import glob
import monsters
class MonsterLoader(object):
def __init__(self, dir):
self.dir = dir
def load_monsters(self):
for fn in glob.glob(os.path.join(self.dir,'*.yml')):
print 'fn', fn
for doc in yaml.safe_load_all(file(fn)):
self.load_monster(doc)
def load_monster(self, doc):
color = doc.get('color', None)
if color is None:
color = libtcod.red
elif hasattr(color, 'upper'):
color = getattr(libtcod, color)
else:
color = libtcod.Color(*color)
ai_class = doc.get('ai_class', BasicMonster)
cls_data = {}
if ai_class is not BasicMonster:
cls_data = {}
if hasattr(ai_class, 'items'):
nm = ai_class.pop('class_name', 'monsters.BasicMonster')
cls_data.update(ai_class)
ai_class = nm
module, clas = ai_class.rsplit('.',1)
module = __import__(module)
ai_class = getattr(module, clas)
death_func = getattr(ai_class, 'death', monster_death)
print 'loading', doc
Game.register_monster_type(
(lambda doc:
lambda map,level,con,x,y: objects.Object( map, con, x,y,
doc['char'],
doc.get('name_fmt', '%s the %s') % (
libtcod.namegen_generate(doc['namegen_class']).capitalize(),
doc['race_name'].capitalize()
),
color,
True,
fighter=objects.Fighter(
hp=doc['hp'],
defense=doc['defense'],
power=doc['power'],
death_function=death_func
),
ai=ai_class().load_data(cls_data),
level=level
)
)(doc), doc['spawn_chance'])
Game.register_monster_type(
lambda map,level, con,x,y: objects.Object(map, con,
x,y, '\x02', '%s the Orc' % libtcod.namegen_generate('Fantasy male'),
libtcod.blue, True,
fighter=objects.Fighter(hp=10, defense=2, power=3, death_function=monster_death),
ai=AdvancedMonster(),
level=level
), 8)
Game.register_monster_type(
lambda map,level, con,x,y: objects.Object(map, con,
x,y, '\x01', '%s the Troll' % libtcod.namegen_generate('Norse male'),
libtcod.orange, True,
fighter=objects.Fighter(hp=16, defense=1, power=4, death_function=monster_death),
ai=AdvancedMonster(),
level=level
), 2)
Game.register_monster_type(
lambda map,level, con,x,y: objects.Object(map, con,
x,y, '\x01', '%s the Olog-Hai' % libtcod.namegen_generate('Norse male'),
libtcod.amber, True,
fighter=objects.Fighter(hp=16, defense=1, power=7, death_function=monster_death),
ai=BasicMonster(),
level=level
), 1)
Game.register_monster_type(None, 7)
| fiddlerwoaroof/yinjar | monsters.py | Python | bsd-3-clause | 7,779 | [
"Amber"
] | 3a0c3ab5226eb569b40985f048a5ceac88937520e5f4d2a794d9ce387d7ec756 |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
spin-free X2C correction for extended systems (experimental feature)
'''
from functools import reduce
import copy
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.gto import mole
from pyscf.lib import logger
from pyscf.x2c import x2c
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import tools
from pyscf.pbc.df import aft
from pyscf.pbc.df import aft_jk
from pyscf.pbc.df import ft_ao
from pyscf.pbc.scf import ghf
from pyscf import __config__
def sfx2c1e(mf):
'''Spin-free X2C.
For the given SCF object, update the hcore constructor.
Args:
mf : an SCF object
Returns:
An SCF object
Examples:
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.RHF(mol).sfx2c1e()
>>> mf.scf()
>>> mol.symmetry = 1
>>> mol.build(0, 0)
>>> mf = scf.UHF(mol).sfx2c1e()
>>> mf.scf()
'''
if isinstance(mf, x2c._X2C_SCF):
if mf.with_x2c is None:
return mf.__class__(mf)
else:
return mf
mf_class = mf.__class__
if mf_class.__doc__ is None:
doc = ''
else:
doc = mf_class.__doc__
class SFX2C1E_SCF(mf_class, x2c._X2C_SCF):
__doc__ = doc + '''
Attributes for spin-free X2C:
with_x2c : X2C object
'''
def __init__(self, mf):
self.__dict__.update(mf.__dict__)
self.with_x2c = SpinFreeX2C(mf.mol)
self._keys = self._keys.union(['with_x2c'])
def get_hcore(self, cell=None, kpts=None, kpt=None):
if cell is None: cell = self.cell
if kpts is None:
if getattr(self, 'kpts', None) is not None:
kpts = self.kpts
else:
if kpt is None:
kpts = self.kpt
else:
kpts = kpt
if self.with_x2c:
hcore = self.with_x2c.get_hcore(cell, kpts)
if isinstance(self, ghf.GHF):
if kpts.ndim == 1:
hcore = scipy.linalg.block_diag(hcore, hcore)
else:
hcore = [scipy.linalg.block_diag(h, h) for h in hcore]
return hcore
else:
return mf_class.get_hcore(self, cell, kpts)
return SFX2C1E_SCF(mf)
sfx2c = sfx2c1e
class X2C(x2c.X2C):
exp_drop = getattr(__config__, 'pbc_x2c_X2C_exp_drop', 0.2)
approx = getattr(__config__, 'pbc_x2c_X2C_approx', 'atom1e')
xuncontract = getattr(__config__, 'pbc_x2c_X2C_xuncontract', True)
basis = getattr(__config__, 'pbc_x2c_X2C_basis', None)
def __init__(self, cell, kpts=None):
self.cell = cell
x2c.X2C.__init__(self, cell)
class SpinFreeX2C(X2C):
def get_hcore(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
xcell, contr_coeff = self.get_xmol(cell)
with_df = aft.AFTDF(xcell)
c = lib.param.LIGHT_SPEED
assert('1E' in self.approx.upper())
if 'ATOM' in self.approx.upper():
atom_slices = xcell.offset_nr_by_atom()
nao = xcell.nao_nr()
x = numpy.zeros((nao,nao))
vloc = numpy.zeros((nao,nao))
wloc = numpy.zeros((nao,nao))
for ia in range(xcell.natm):
ish0, ish1, p0, p1 = atom_slices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
t1 = xcell.intor('int1e_kin', shls_slice=shls_slice)
s1 = xcell.intor('int1e_ovlp', shls_slice=shls_slice)
with xcell.with_rinv_at_nucleus(ia):
z = -xcell.atom_charge(ia)
v1 = z * xcell.intor('int1e_rinv', shls_slice=shls_slice)
w1 = z * xcell.intor('int1e_prinvp', shls_slice=shls_slice)
vloc[p0:p1,p0:p1] = v1
wloc[p0:p1,p0:p1] = w1
x[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)
else:
raise NotImplementedError
t = xcell.pbc_intor('int1e_kin', 1, lib.HERMITIAN, kpts_lst)
s = xcell.pbc_intor('int1e_ovlp', 1, lib.HERMITIAN, kpts_lst)
v = with_df.get_nuc(kpts_lst)
#w = get_pnucp(with_df, kpts_lst)
if self.basis is not None:
s22 = s
s21 = pbcgto.intor_cross('int1e_ovlp', xcell, cell, kpts=kpts_lst)
h1_kpts = []
for k in range(len(kpts_lst)):
# The treatment of pnucp local part has huge effects to hcore
#h1 = x2c._get_hcore_fw(t[k], vloc, wloc, s[k], x, c) - vloc + v[k]
#h1 = x2c._get_hcore_fw(t[k], v[k], w[k], s[k], x, c)
h1 = x2c._get_hcore_fw(t[k], v[k], wloc, s[k], x, c)
if self.basis is not None:
c = lib.cho_solve(s22[k], s21[k])
h1 = reduce(numpy.dot, (c.T, h1, c))
if self.xuncontract and contr_coeff is not None:
h1 = reduce(numpy.dot, (contr_coeff.T, h1, contr_coeff))
h1_kpts.append(h1)
if kpts is None or numpy.shape(kpts) == (3,):
h1_kpts = h1_kpts[0]
return lib.asarray(h1_kpts)
def get_xmat(self, cell=None, kpts=None):
if cell is None: cell = self.cell
xcell, contr_coeff = self.get_xmol(cell)
c = lib.param.LIGHT_SPEED
assert('1E' in self.approx.upper())
if 'ATOM' in self.approx.upper():
atom_slices = xcell.offset_nr_by_atom()
nao = xcell.nao_nr()
x = numpy.zeros((nao,nao))
for ia in range(xcell.natm):
ish0, ish1, p0, p1 = atom_slices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
t1 = xcell.intor('int1e_kin', shls_slice=shls_slice)
s1 = xcell.intor('int1e_ovlp', shls_slice=shls_slice)
with xcell.with_rinv_at_nucleus(ia):
z = -xcell.atom_charge(ia)
v1 = z * xcell.intor('int1e_rinv', shls_slice=shls_slice)
w1 = z * xcell.intor('int1e_prinvp', shls_slice=shls_slice)
x[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)
else:
raise NotImplementedError
return x
# Use Ewald-like technique to compute spVsp.
# spVsp may not be divergent because the numerator spsp and the denominator
# in Coulomb kernel 4pi/G^2 are likely cancelled. Even a real space lattice
# sum can converge to a finite value, it's difficult to accurately converge
# this value, i.e., large number of images in lattice summation is required.
def get_pnucp(mydf, kpts=None):
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
log = logger.Logger(mydf.stdout, mydf.verbose)
t1 = (logger.process_clock(), logger.perf_counter())
nkpts = len(kpts_lst)
nao = cell.nao_nr()
nao_pair = nao * (nao+1) // 2
Gv, Gvbase, kws = cell.get_Gv_weights(mydf.mesh)
charge = -cell.atom_charges()
kpt_allow = numpy.zeros(3)
coulG = tools.get_coulG(cell, kpt_allow, mesh=mydf.mesh, Gv=Gv)
coulG *= kws
if mydf.eta == 0:
wj = numpy.zeros((nkpts,nao_pair), dtype=numpy.complex128)
SI = cell.get_SI(Gv)
vG = numpy.einsum('i,ix->x', charge, SI) * coulG
wj = numpy.zeros((nkpts,nao_pair), dtype=numpy.complex128)
else:
nuccell = copy.copy(cell)
half_sph_norm = .5/numpy.sqrt(numpy.pi)
norm = half_sph_norm/mole.gaussian_int(2, mydf.eta)
chg_env = [mydf.eta, norm]
ptr_eta = cell._env.size
ptr_norm = ptr_eta + 1
chg_bas = [[ia, 0, 1, 1, 0, ptr_eta, ptr_norm, 0] for ia in range(cell.natm)]
nuccell._atm = cell._atm
nuccell._bas = numpy.asarray(chg_bas, dtype=numpy.int32)
nuccell._env = numpy.hstack((cell._env, chg_env))
wj = lib.asarray(mydf._int_nuc_vloc(nuccell, kpts_lst, 'int3c2e_pvp1'))
t1 = log.timer_debug1('pnucp pass1: analytic int', *t1)
aoaux = ft_ao.ft_ao(nuccell, Gv)
vG = numpy.einsum('i,xi->x', charge, aoaux) * coulG
if cell.dimension == 3:
nucbar = sum([z/nuccell.bas_exp(i)[0] for i,z in enumerate(charge)])
nucbar *= numpy.pi/cell.vol
ovlp = cell.pbc_intor('int1e_kin', 1, lib.HERMITIAN, kpts_lst)
for k in range(nkpts):
s = lib.pack_tril(ovlp[k])
# *2 due to the factor 1/2 in T
wj[k] -= nucbar*2 * s
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
for aoaoks, p0, p1 in mydf.ft_loop(mydf.mesh, kpt_allow, kpts_lst,
max_memory=max_memory, aosym='s2',
intor='GTO_ft_pdotp'):
for k, aoao in enumerate(aoaoks):
if aft_jk.gamma_point(kpts_lst[k]):
wj[k] += numpy.einsum('k,kx->x', vG[p0:p1].real, aoao.real)
wj[k] += numpy.einsum('k,kx->x', vG[p0:p1].imag, aoao.imag)
else:
wj[k] += numpy.einsum('k,kx->x', vG[p0:p1].conj(), aoao)
t1 = log.timer_debug1('contracting pnucp', *t1)
wj_kpts = []
for k, kpt in enumerate(kpts_lst):
if aft_jk.gamma_point(kpt):
wj_kpts.append(lib.unpack_tril(wj[k].real.copy()))
else:
wj_kpts.append(lib.unpack_tril(wj[k]))
if kpts is None or numpy.shape(kpts) == (3,):
wj_kpts = wj_kpts[0]
return numpy.asarray(wj_kpts)
if __name__ == '__main__':
from pyscf.pbc import scf
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = numpy.eye(3)*4,
mesh = [11]*3,
atom = 'H 0 0 0; H 0 0 1.8',
verbose = 4,
basis='sto3g')
lib.param.LIGHT_SPEED = 2
mf = scf.RHF(cell)
mf.with_df = aft.AFTDF(cell)
enr = mf.kernel()
print('E(NR) = %.12g' % enr)
mf = sfx2c1e(mf)
esfx2c = mf.kernel()
print('E(SFX2C1E) = %.12g' % esfx2c)
mf = scf.KRHF(cell)
mf.with_df = aft.AFTDF(cell)
mf.kpts = cell.make_kpts([2,2,1])
enr = mf.kernel()
print('E(k-NR) = %.12g' % enr)
mf = sfx2c1e(mf)
esfx2c = mf.kernel()
print('E(k-SFX2C1E) = %.12g' % esfx2c)
# cell = pbcgto.M(unit = 'B',
# a = numpy.eye(3)*4,
# atom = 'H 0 0 0; H 0 0 1.8',
# mesh = None,
# dimension = 2,
# basis='sto3g')
# with_df = aft.AFTDF(cell)
# w0 = get_pnucp(with_df, cell.make_kpts([2,2,1]))
# with_df = aft.AFTDF(cell)
# with_df.eta = 0
# w1 = get_pnucp(with_df, cell.make_kpts([2,2,1]))
# print(abs(w0-w1).max())
| sunqm/pyscf | pyscf/pbc/x2c/sfx2c1e.py | Python | apache-2.0 | 11,486 | [
"PySCF"
] | 31043fff7ad310df88742821b7b8bb6bc5635b99288afe3f54c878300ff68c86 |
# -*- coding: utf-8 -*-
# @author: Thomas Anderson
###################
# run_expt.py #
###################
# Thomas Anderson #
###################
# 29/01/2014 #
###################
# Last updated: #
# 11/03/2014 #
###################
##########################
# Python QR Code Project #
# ---------------------- #
# Main Program #
##########################
"""
===========
Version 1.0
===========
Released:
11/03/2014
===========
=========================================================================================================
Requires: random, string [Included in python installation], mod_imgtools and mod_encode_decode [Supplied]
=========================================================================================================
A program used to encode, degrade and then test qr codes to see if each error setting can read the images
produced using the specified blur radius and image size.
In this directory are the two modules required for the program to run, mod_imgtools and mod_encode_decode.
Also in the directory are two folders named 'images' and 'data' used to keep the structure of the files
in an ordered manner.
- images/calibration_purewhite.png = A 100 x 100 px pure white image
- images/calibration_25percent.png = The file above with a 50 x 50 px black square in the top left
covering 25% of the image.
- mod_encode_decode.py = Thomas Anderson's encoding and decoding module.
- mod_imgtools.py = Thomas Anderson's image processing module.
For usage of each function, please refer to the DOCSTRING. There are a few user configurable variables
which can be altered and set to different values in order to produce different output files.
Changing the 'blur' variable will alter the amount that each set of pixels is gaussian blurred by to
produce the image to be tested.
Changing the 'maxsize' variable will alter the maximum image size that will be created for testing.
Each image is degraded and tested to see if it can be decoded and then the program will produce a text
file in the data folder indicating the results of the test.
This program may be ran standalone, but the required modules must be included in a file.
Remeber that due to the licensing this file is released under (MIT License), all usage must include
credit to the original owner (myself) and so you must include the first line if you import or it will
be an infringement of copyright.
Source files can be found on https://github.com/Driminary/python-qr-project
"""
# Import modules
import mod_encode_decode as qr_ed # My encoding-decoding module
import mod_imgtools as qr_imgtools # My image processing module
import random, string
############################
# User Configuration Start #
############################
# Set the type of error redundancy [Must be between 1 and 4]
errorsetting = 4
# Set max image size [To stop massive resource usage, keep this below 15]
maxsize = 10
##########################
# User Configuration End #
##########################
# Check blur is positive integer
assert int(errorsetting) >= 1 and int(errorsetting) <=4, "The specified error redundancy was not valid"
# Check maxsize is positive integer
assert int(maxsize) > 1 , "The specified maximum size was not a valid number"
# Set message as a randomly generating string of letters and numbers 32 characters long
message = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])
# Open file to write output to
f = open("./data/data_blur_%s_output.txt" % errorsetting, "w")
# Print the message - write to file
print >>f , "\n================\nThe message encoded this run is: ", message, "\n================\nError Setting = %s\n================\nMax Size = %s\n================" % (errorsetting,maxsize)
# Initialise counter
blur = 5
# Do a set of tests for all blur and sizes
while (blur <= 15):
# Print blur size (run number) - write to file
print >>f , "\n================\nBlur Size = %s\n================\n" % (blur)
# Initialise counter
i = 1
# Repeat for the sizes 1 -> max size
while (i <= maxsize):
# Run encoding function from encode_decode for each error redundancy
qr_ed.encode(message,"code",i,errorsetting)
# Run addblur function from imgtools
qr_imgtools.addblur(blur,"./images/code.png")
# Run compare function from imgtools - write to file
print >>f , "The reading of image size ", i, " (where the images were ",qr_imgtools.compare("./images/code.png","./images/code_blurred.png"), "% different) was a ", qr_ed.decode("./images/code_blurred.png")
# Increase counter
i += 1
# Increase counter
blur += 1
# Print a success message, if the program failed at any point, the error handling in the code should take care of it.
print "Complete, wrote output to ", f.name
# Close the file
f.close() | Driminary/python-qr-project | source/run_expt.py | Python | mit | 5,005 | [
"Gaussian"
] | a48053c02cc7bda1a158c5f91d3ee01254bd78336ec9408effef9d0c3933eca2 |
#!/usr/bin/python
import os, sys, inspect, subprocess, tempfile
# Include the parent directory in the search path.
cmd_folder = os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0])
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from common import dbConn, DEFAULT_TAG, BACKUP_TOP, config, problems
from datetime import datetime
import itertools
import time
import calendar
import glob
# Edit distance, for approximate problem name matching.
def editDist( a, b ):
dst = [ [ 0 for x in range( 0, len( b ) + 1 ) ] for y in range( 0, len( a ) + 1 ) ]
for i in range( 0, len( a ) + 1 ):
dst[ i ][ 0 ] = i
for j in range( 0, len( b ) + 1 ):
dst[ 0 ][ j ] = j
for i in range( 1, len( a ) + 1 ):
for j in range( 1, len( b ) + 1 ):
if ( a[ i - 1 ] == b[ j - 1 ] ):
dst[ i ][ j ] = dst[ i - 1 ][ j - 1 ]
else:
dst[ i ][ j ] = dst[ i - 1 ][ j - 1 ] + 1
if dst[ i - 1 ][ j ] + 1 < dst[ i ][ j ]:
dst[ i ][ j ] = dst[ i - 1 ][ j ] + 1
if dst[ i ][ j - 1 ] + 1 < dst[ i ][ j ]:
dst[ i ][ j ] = dst[ i ][ j - 1 ] + 1
return dst[ len( a ) ][ len( b ) ]
def repeatedString( str, pat ):
"""Return true if str is one or more repeated copies of pat."""
n = len( pat );
if len( str ) < n:
return 0
for j in range( 0, len( str ) ):
if str[ j ] != pat[ j % n ]:
return 0;
return 1;
class File:
def __init__( self, path, time ):
# Set team directory, path under that and modification time.
self.path = path
self.time = time
# File size in bytes
self.size = 0
# Number of lines in the file (once we compute it)
self.lineCount = 0
# Report of lines added and removed (from git)
self.linesChanged = 0
def __repr__(self):
return '%s' % self.path
# Representation for the value part of the file_to_problem mapping,
# i.e., one line from the file_to_problem table. This used to be a
# 4-tuple. When the lang_id was added, this struct was created to
# help with the transition to 5 fields and to make the structure
# easier to interpret.
class MappingRec:
def __init__( self, db_id, problem_id, lang_id, override, new_problem_id ):
# Remember the five database fields.
self.db_id = db_id
self.problem_id = problem_id
self.lang_id = lang_id
self.override = override
self.new_problem_id = new_problem_id
# This functionality needs to be split into an executable analyzer and
# a reusable classifier. That will help with testing.
class Analyzer:
def __init__( self, basePath ):
# path to the top of the backup directory.
self.basePath = basePath
# index of the last team in the competition.
self.lastTeam = config[ "teambackup" ][ "lastTeam" ]
# interval for updating team backups, used in this script to freshen stale modification
# times, if it looks like there's a reason.
self.backupInterval = config[ "teambackup" ][ "interval" ]
# Strings to strip from the filename before we try to guess
self.commonStrips = [ 'problem', 'prob', '_', '-' ]
# Valid source file extensions, and what each one says
# about the source language.
self.extensionMap = {
'cc': "C++",
'cpp': "C++",
'c': "C",
'java': "Java",
'py': "Python" # FIXME: do we want to discern between Python 2/3?
}
# map from problem ID to a list of keywords to look for.
self.probKeywords = {}
# List of problems. We use this mostly as the offficial
# list of problem letters, from the configuration.
self.problemList = problems
# Contest start time in Unix seconds from the database.
cursor = dbConn.cursor()
cursor.execute( "SELECT start_time FROM contests ORDER BY start_time DESC LIMIT 1" )
row = cursor.fetchone()
if ( row == None ):
print("Error: no contest found in the database.")
exit(1)
self.contestStart = row[0]
# For each team, a list of team-specific strips from filenames.
self.teamStrips = {}
# we use the next two fields to hold copies of database
# information (the file_to_problem and file_modtime tables)
# while the script is running, and to update the tables once
# the script has run.
# For every team and path, this is a triple, datatabase_id,
# latest modification time and File object (if the file has
# has changed). We only add a new entry to edit_activity if
# it's sufficiently newer than what we have there or if we just
# committed and git reports that a file has changed.
self.lastEditTimes = {}
# map from team_id and path to a MappingRec instance
# containing db_id, problem_id, lang_id, override flag and new
# problem ID (if we just generated a new mapping). This lets
# us know what to ignore in the mapping and what to update
# when we re-write the database. Multiple files may map to
# the same problem, if the team is working on multiple
# versions or has some supporting files.
self.fileMappings = {}
def loadConfiguration( self ):
# Read the list of problem names
self.probKeywords = {}
cursor = dbConn.cursor()
cursor.execute( "SELECT problem_id, keyword FROM problem_keywords" )
row = cursor.fetchone()
while ( row != None ):
if ( row[ 0 ] in self.probKeywords ):
self.probKeywords[ row[ 0 ] ].append( row[ 1 ].lower() )
else:
self.probKeywords[ row[ 0 ] ] = [ row[ 1 ].lower() ]
row = cursor.fetchone()
# get latest known edit times for every team/path
cursor.execute( "SELECT id, team_id, path, modify_timestamp FROM file_modtime" )
row = cursor.fetchone()
while ( row != None ):
self.lastEditTimes[ ( row[ 1 ], row[ 2 ] ) ] = [ row[ 0 ], row[ 3 ], None ]
row = cursor.fetchone()
# get existing mapping records for all mapped files.
cursor.execute( "SELECT id, team_id, path, problem_id, lang_id, override FROM file_to_problem" )
row = cursor.fetchone()
while ( row != None ):
self.fileMappings[ ( int( row[ 1 ] ), row[ 2 ] ) ] = MappingRec( row[ 0 ], row[ 3 ], row[ 4 ], row[ 5 ], None )
# Old mapping
# 0 -> id, 1 -> problem_id, 2 -> override, 3 -> new_problem_id
row = cursor.fetchone()
# load any team-specific strips.
cursor.execute( "SELECT team_id, str FROM team_strips" )
row = cursor.fetchone()
while ( row != None ):
if ( row[ 0 ] in self.teamStrips ):
self.teamStrips[ row[ 0 ] ].append( row[ 1 ].lower() )
else:
self.teamStrips[ row[ 0 ] ] = [ row[ 1 ].lower() ]
row = cursor.fetchone()
cursor.close()
def parseGitDiffs( self, bdir ):
"""Get a full report of differences between the current
revision and the previous one. Return as a map from path name
(teamxxx/path/to/file) to a tuple giving lines removed and
lines added."""
origin = os.getcwd()
os.chdir( bdir )
statFile = tempfile.TemporaryFile()
# Get the report.
subprocess.call( [ "git", "diff", "--numstat", "HEAD^", "HEAD" ], stdout=statFile )
# Each line is lines added, lines removed, path
result = {}
statFile.seek( 0 )
for line in statFile:
fields = line.rstrip().split( "\t" )
# Git still tracks binary files, but it doesn't report lines
# changed. Looks like it just reports a dash instead, but
# we ignore anything that's not an int.
try:
result[ fields[ 2 ] ] = ( int( fields[ 0 ] ), int( fields[ 1 ] ) )
except ValueError:
pass
os.chdir( origin )
return result;
def countLines(self, p):
"""Given path p, count the number of lines in the file it points to."""
f = open( p )
lineCount = sum( 1 for line in f)
f.close()
return lineCount
def checkAutosaves( self, f ):
# Split into directory and file.
( dirName, fileName ) = os.path.split( f )
autoTime = None
# is it an emacs autosave file
autoFile = "%s/#%s#" % ( dirName, fileName )
if os.path.exists( autoFile ):
autoTime = os.path.getmtime( autoFile )
# is it a vim autosave file
autoFile = "%s/.%s.swp" % ( dirName, fileName )
if os.path.exists( autoFile ):
newTime = os.path.getmtime( autoFile )
if ( autoTime == None or newTime > autoTime ):
autoTime = newTime
return autoTime
def stripDecoration( self, strips, str ):
last = None
while str != last:
last = str
for s in strips:
idx = str.find( s )
while idx != -1:
str = str[:idx] + str[(idx + len( s )):]
idx = str.find( s )
return str
def guessProblem( self, team, path ):
"""Should return the most likely problem id for this file,
or None.
"""
# Split into directory and file.
( dirName, fileName ) = os.path.split( path )
dirName = dirName.lower()
fileName = fileName.lower()
# Build general and team-specific strips. I'm sure
# there's a better way to do this.
strips = []
for x in self.commonStrips:
strips.append( x )
if ( team in self.teamStrips ):
for x in self.teamStrips[ team ]:
strips.append( x )
baseName, extension = os.path.splitext( fileName )
extension = extension.lstrip( '.' )
if extension not in self.extensionMap:
return None
# Strip off extra words
shortName = self.stripDecoration( strips, baseName )
# Ordering here is a little bit important. We look first
# at the matches that are more confident. Then, we look
# at the ones that are less likely
# First, consider just the filename against all problem keywords.
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
# tsp.cpp -> a
if shortName == keyword:
return problem_id
# Here, we try to match against arbitrarily many occurrences of the problem
# letter. Some teams are using names like aaa.c for their third attempt.
for problem_id in self.problemList:
# a.cpp -> a or aaa.cpp -> a
if repeatedString( shortName, problem_id.lower() ):
return problem_id
# Then, start looking at the path.
if len( dirName ) > 0:
dirList = dirName.split( '/' )
for dir in dirList:
shortDirName = self.stripDecoration( strips, dir )
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
# tsp/sol.java -> a
if shortDirName == keyword:
return problem_id
for problem_id in self.problemList:
# a/code.cpp -> a or aaa/code.cpp -> a
if repeatedString( shortDirName, problem_id.lower() ):
return problem_id
# Then, take matches that occur anywhere in the problem name
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
if ( keyword in baseName ):
return problem_id
# Then, the problem letter attached to some other word with a
# non-alpha character.
for problem_id in self.problemList:
letter = problem_id.lower()
# b_2.c -> b
if ( len( baseName ) > len( letter ) and
baseName.startswith( letter ) and
not baseName[ len( letter ) ].isalpha() ):
return problem_id
# losning_b.c -> b
if ( len( baseName ) > len( letter ) and
baseName.endswith( letter ) and
not baseName[ -( len( letter ) + 1 )].isalpha() ):
return problem_id
# Then, look for path elements containing the name.
if len( dirName ) > 0:
dirList = dirName.split( '/' )
for dir in dirList:
shortDirName = self.stripDecoration( strips, dir )
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
# retry_b/sol.java -> b
if keyword in shortDirName:
return problem_id
# Then, try an approximate match against a keyword, willing to miss
# a fraction of the total characters.
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
if ( len( keyword ) > 3 and
editDist( keyword, shortName ) <= len( keyword ) * 0.25 ):
return problem_id
# Then, look for approximate matches in any directory element.
if len( dirName ) > 0:
dirList = dirName.split( '/' )
for dir in dirList:
shortDirName = self.stripDecoration( strips, dir )
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
if ( len( keyword ) > 3 and
editDist( keyword, shortDirName ) <= len( keyword ) * 0.25 ):
return problem_id
return None
def guessPath( self, team, path ):
"""Testing interface for problem guessing"""
self.loadConfiguration()
return self.guessProblem( team, path )
def checkActivity( self, bdir, tag ):
"""Scan the given backup dir and generate reports of the state of
files believed to correspond to various problems in the problem set."""
# Time when this script started running.
scriptStartTime = int( time.time() )
self.loadConfiguration()
# Get diff reports from git.
gitDiffs = self.parseGitDiffs( bdir )
# We should rethink some of the following loop. Right now, it
# tries to find file changes, including changes to editor auto-save
# files. But, to report changed lines in the file, we depend on git
# (which probably isn't tracking these auto-save files, so we'd have
# nothing to report)
# Visit home directory for each team.
tlist = sorted( glob.glob( bdir + '/team*' ) )
for tdir in tlist:
( dirname, tname ) = os.path.split( tdir )
team = int( tname.lstrip( 'team' ) )
cmd = "find %s/ -type f" % tdir
for f in os.popen( cmd ).readlines():
f = f.rstrip( '\n' )
fname = f[len(tdir) + 1:]
( dummy, extension ) = os.path.splitext( fname )
extension = extension.lstrip( '.' )
if extension in self.extensionMap:
fobj = File( fname, os.path.getmtime( f ) )
# Get lines changed, etc. We need to consult the git diff output.
# The tag is to make sure we only record lines changed on an analysis
# pass that's paired to a git commit. Independent analysis passes
# don't get this, since that could infate the appearance of how
# much editing is being done.
gitPath = f[len(bdir) + 1:]
if gitPath in gitDiffs and tag != DEFAULT_TAG:
fobj.linesChanged = gitDiffs[ gitPath ][ 0 ] + gitDiffs[ gitPath ][ 1 ];
mappingRec = None
lastEditRec = None
# Files with completely implausible modification times can get ignored.
ignoreEdit = False
# see if there's a mapping for this file.
if ( team, fname ) in self.fileMappings:
mappingRec = self.fileMappings[ ( team, fname ) ]
# If there's no forced mapping for this problem, try to guess one.
if ( mappingRec == None or mappingRec.override == 0 ):
prob = self.guessProblem( team, fobj.path )
if prob != None:
if mappingRec == None:
mappingRec = MappingRec( None, None, self.extensionMap[ extension ],
0, None );
self.fileMappings[ ( team, fname ) ] = mappingRec
if mappingRec.problem_id != prob:
mappingRec.new_problem_id = prob;
# see if there's an edit record for this file.
if ( team, fname ) in self.lastEditTimes:
lastEditRec = self.lastEditTimes[ ( team, fname ) ]
# check common editor auto-saves, to see if there
# is a fresher modification time.
autoTime = self.checkAutosaves( f );
if ( autoTime != None and autoTime > fobj.time ):
fobj.time = autoTime
# Try to guard against anomalous file edit times. These are unlikely to happen,
# but they could look strange in the report or even suppress tracking of files
# if they have a modification time in the future.
# No edits should happen before the start of the contest or after right now.
if fobj.time < self.contestStart:
fobj.time = self.contestStart
# If the file really changes, we definitely want to record it, possibly with
# a sane-ified modification time.
if fobj.time < scriptStartTime - 2 * self.backupInterval:
if fobj.linesChanged > 0:
fobj.time = int( scriptStartTime - self.backupInterval / 2 )
# If the file looks like it changed in the future, ignore it unless git agrees it's changing.
if fobj.time > scriptStartTime + self.backupInterval:
print "Future Modification: ", fobj.path, " changed ", fobj.linesChanged, " lines, ", (fobj.time - scriptStartTime), " seconds in the future"
if fobj.linesChanged > 0:
fobj.time = scriptStartTime
else:
ignoreEdit = True
# Is this newer than our last known edit?
# We don't just depend on git for this, since we can
# also watch auto-saves.
if not ignoreEdit and ( lastEditRec == None or lastEditRec[ 1 ] + 10 < fobj.time ):
if lastEditRec == None:
lastEditRec = [ None, None, None ]
self.lastEditTimes[ ( team, fname ) ] = lastEditRec
# Grab file size and number of lines.
fobj.size = os.path.getsize( f )
fobj.lineCount = self.countLines( f )
lastEditRec[ 2 ] = fobj;
# Write out any new mappings
cursor = dbConn.cursor()
for k, v in self.fileMappings.iteritems():
if v.new_problem_id != None:
if v.db_id == None:
update = "INSERT INTO file_to_problem (team_id, path, problem_id, lang_id, override ) VALUES ( '%s', '%s', '%s', '%s', '0' )" % ( k[ 0 ], dbConn.escape_string( k[ 1 ] ), v.new_problem_id, v.lang_id )
cursor.execute( update )
else:
update = "UPDATE file_to_problem SET problem_id='%s' WHERE id='%d'" % ( v.new_problem_id, v[ 0 ] )
cursor.execute( update )
print "( %s, %s ) -> %s" % ( k[ 0 ], k[ 1 ], v.new_problem_id )
# Write out fresh edit times to file_modtime and new records to edit_activity
cursor = dbConn.cursor()
for k, v in self.lastEditTimes.iteritems():
if v[ 2 ] != None:
t = time.gmtime( v[ 2 ].time )
if v[ 0 ] == None:
update = "INSERT INTO file_modtime (team_id, path, modify_timestamp ) VALUES ( '%s', '%s', '%d' )" % ( k[ 0 ], dbConn.escape_string( k[ 1 ] ), t )
cursor.execute( update )
else:
update = "UPDATE file_modtime SET modify_time='%d' WHERE id='%d'" % ( t, v[ 0 ] )
cursor.execute( update )
# Compute time since start of contest.
cmin = ( v[ 2 ].time - self.contestStart ) / 60
update = "INSERT INTO edit_activity (team_id, path, modify_timestamp, modify_time, file_size_bytes, line_count, lines_changed, git_tag ) VALUES ( '%s', '%s', '%d', '%s', '%d', '%d', '%d', '%s' )" % ( k[ 0 ], dbConn.escape_string( k[ 1 ] ), t, cmin, v[ 2 ].size, v[ 2 ].lineCount, v[ 2 ].linesChanged, tag )
cursor.execute( update )
# Create and write the summary of edit activity by problem, edit_latest
# Map from team and problem_id to a triple, database_id,
# timestamp and valid flag. the valid flag lets us delete
# database rows (say, if a file_to_problem mapping changes). An
# entry is valid as long as there is a file that's mapped to
# the given problem, even if the file no longer exists.
modLatest = {}
# get latest known edit times for every team/problem.
cursor.execute( "SELECT id, team_id, problem_id, modify_timestamp FROM edit_latest" )
row = cursor.fetchone()
while ( row != None ):
modLatest[ ( row[ 1 ], row[ 2 ] ) ] = [ row[ 0 ], row[ 3 ], 0 ]
row = cursor.fetchone()
for k, v in self.fileMappings.iteritems():
prob = v.problem_id
if v.new_problem_id != None:
prob = v.new_problem_id
if prob != None and prob != 'none':
if k in self.lastEditTimes:
lastEditRec = self.lastEditTimes[ k ]
t = lastEditRec[ 1 ]
if lastEditRec[ 2 ] != None:
t = lastEditRec[ 2 ].time;
if ( k[ 0 ], prob ) in modLatest:
rec = modLatest[ ( k[ 0 ], prob ) ]
if t > rec[ 1 ]:
rec[ 1 ] = t
rec[ 2 ] = 1;
else:
modLatest[ ( k[ 0 ], prob ) ] = [ None, t, 1 ]
for k, v in modLatest.iteritems():
t = time.gmtime( v[ 1 ] )
if v[ 0 ] == None:
update = "INSERT INTO edit_latest (team_id, problem_id, modify_timestamp ) VALUES ( '%s', '%s', '%d' )" % ( k[ 0 ], k[ 1 ], t )
cursor.execute( update )
elif v[ 2 ]:
update = "UPDATE edit_latest SET modify_timestamp='%d' WHERE id='%d'" % ( t, v[ 0 ] )
cursor.execute( update )
else:
update = "DELETE FROM edit_latest WHERE id='%d'" % ( v[ 0 ] )
cursor.execute( update )
def reportUnclassified( self, bdir ):
"""Report all the source files that are not mapped to any problem
yet."""
self.loadConfiguration()
# Visit home directory for each team.
tlist = sorted( glob.glob( bdir + '/team*' ) )
for tdir in tlist:
( dirname, tname ) = os.path.split( tdir )
team = int( tname.lstrip( 'team' ) )
cmd = "find %s/ -type f" % tdir
for f in os.popen( cmd ).readlines():
f = f.rstrip( '\n' )
fname = f[len(tdir) + 1:]
( dummy, extension ) = os.path.splitext( fname )
extension = extension.lstrip( '.' )
if extension in self.extensionMap:
fobj = File( fname, os.path.getmtime( f ) )
prob = None;
# see if there's an override for this file.
if ( team, fname ) in self.fileMappings:
mappingRec = self.fileMappings[ ( team, fname ) ]
if mappingRec.override:
prob = mappingRec.problem_id
print "%s <= %s" % ( prob, f )
# if it's not a forced mapping, try to guess and report that.
if prob == None:
# No forced problem, try to guess.
prob = self.guessProblem( team, fobj.path )
# report the file and the problem its assigned to.
if prob == None:
print "unknown <- %s" % ( f )
else:
print "%s <- %s" % ( prob, f )
if __name__ == '__main__':
analyzer = Analyzer( BACKUP_TOP )
tag = DEFAULT_TAG;
if len( sys.argv ) > 1:
tag = sys.argv[ 1 ]
analyzer.checkActivity( BACKUP_TOP, tag )
| eldering/autoanalyst | code_analyzer/analyzer.py | Python | mit | 26,667 | [
"VisIt"
] | 90e18d26a5658cdc46915932ac301a41141c26565401e9bf003afe020ebfc00d |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import numpy as np
from psi4 import core
from psi4.driver.p4util.exceptions import *
"""
This module provides ``engine`` objects that can be used by the :func:`~psi4.driver.p4util.solvers.davidson_solver` and
:func:`~psi4.driver.p4util.solvers.hamiltonian_solver`
Spin Orbital Expressions for the Relevant product components
------------------------------------------------------------
Aia,jb = (E_a - E_i) + J - K
= (E_a - E_i) + (ia|jb) - (ij|ab)
Bia,jb = J - K^T
= (ai|bj) - (aj|bi)
H2ia,jb = A - B
= [(E_a - E_i) + J - K ] - [ J - K^T ]
= [(E_a - E_i) + (ia|jb) - (ij|ab)] - [ (ai|bj) - (aj|bi)]
= (E_a - E_i) - K + K^T
= (E_a - E_i) - (ij|ab) + (aj|bi)
H1ia,jb = A + B
= [(E_a - E_i) + J - K ] + [ J - K^T ]
= [(E_a - E_i) + (ia|jb) - (ij|ab)] + [ (ai|bj) - (aj|bi)]
= [(E_a - E_i) + J - K - K^T]
= [(E_a - E_i) + (ia|jb) - (ij|ab) - (aj|bi)]
"""
class SingleMatPerVector:
"""Operations for RHF-like systems where the `vector` is a single :py:class:`psi4.core.Matrix`
"""
@staticmethod
def vector_dot(X, Y):
return X.vector_dot(Y)
@staticmethod
def vector_scale(a, X):
X.scale(a)
return X
@staticmethod
def vector_axpy(a, X, Y):
Y.axpy(a, X)
return Y
@staticmethod
def vector_copy(X):
return X.clone()
@staticmethod
def vector_transpose(X):
return X.transpose()
class PairedMatPerVector:
"""Operations for UHF-like systems where the vector is a pair of :py:class:`psi4.core.Matrix` objects holding
(Alpha, Beta) components.
"""
@staticmethod
def vector_dot(X, Y):
dot = X[0].vector_dot(Y[0])
dot += X[1].vector_dot(Y[1])
return dot
@staticmethod
def vector_scale(a, X):
X[0].scale(a)
X[1].scale(a)
return X
@staticmethod
def vector_axpy(a, X, Y):
Y[0].axpy(a, X[0])
Y[1].axpy(a, X[1])
return Y
@staticmethod
def vector_copy(X):
return [X[0].clone(), X[1].clone()]
@staticmethod
def vector_transpose(X):
return [X[0].transpose(), X[1].transpose()]
class ProductCache:
"""Caches product vectors
"""
def __init__(self, *product_types):
"""Creates a new Product Cache
Parameters
----------
*product_types, list of str
A list of product labels
"""
self._products = {p: [] for p in product_types}
def add(self, pkey, new_elements):
"""Adds new elements to a given key
Parameters
----------
pkey : str
Product label
new_elements : list of arrays
New products to add to the cache
"""
if pkey not in self._products.keys():
raise AttributeError("No such product {}".format(pkey))
for new in new_elements:
self._products[pkey].append(new)
return self._products[pkey].copy()
def reset(self):
"""Resets the ProductCache by clearing all data.
"""
for pkey in self._products.keys():
self._products[pkey].clear()
def count(self):
"""Return the number of cached products
"""
lens = [len(self._products[pkey]) for pkey in self._products.keys()]
all_same = all(lens[0] == x for x in lens)
if all_same:
return lens[0]
else:
raise ValueError("Cache lengths are not the same, invalid cache error. Call a developer.")
class TDRSCFEngine(SingleMatPerVector):
"""Engine for R(HF/KS) products
Fulfills the API required by :class:`~psi4.driver.p4util.solvers.SolverEngine`
Parameters
----------
wfn : :py:class:`psi4.core.Wavefunction`
The converged SCF wfn
ptype : {'rpa', 'tda'}
The product type to be evaluated. When ``ptype == 'rpa'``. The return of `compute_products` will be as
expected by :func:`~psi4.driver.p4util.solvers.hamiltonian_solver`, when ``ptype == 'tda'`` the return of
compute_products will be as expected by :func:`~psi4.driver.p4util.solvers.davidson_solver`.
triplet : bool , optional
Are products spin-adapted for triplet excitations?
"""
def __init__(self, wfn, *, ptype, triplet=False):
# primary data
self.wfn = wfn
self.ptype = ptype.lower()
self.needs_K_like = self.wfn.functional().is_x_hybrid() or self.wfn.functional().is_x_lrc()
if self.ptype not in ["rpa", "tda"]:
raise KeyError(f"Product type {self.ptype} not understood")
# product type
self.singlet = not triplet
if ptype == 'rpa':
self.product_cache = ProductCache("H1", "H2")
else:
self.product_cache = ProductCache("A")
# orbitals and eigenvalues
self.Co = self.wfn.Ca_subset("SO", "OCC")
self.Cv = self.wfn.Ca_subset("SO", "VIR")
self.E_occ = self.wfn.epsilon_a_subset("SO", "OCC")
self.E_vir = self.wfn.epsilon_a_subset("SO", "VIR")
self.prec = None
# ground state symmetry
self.G_gs = 0
# ground state spin multiplicity
self.mult_gs = wfn.molecule().multiplicity()
# excited state symmetry
self.G_es = None
# symmetry of transition
self.occpi = self.wfn.nalphapi()
self.virpi = self.wfn.nmopi() - self.occpi
self.nsopi = self.wfn.nsopi()
self.reset_for_state_symm(0)
## API required by "engine" (see p4util.solvers.davidson/hamiltonian_solver)
def new_vector(self, name=""):
"""Obtain a blank matrix object with the correct symmetry"""
return core.Matrix(name, self.occpi, self.virpi, self.G_trans)
def reset_for_state_symm(self, symmetry):
"""Reset internal quantities so the object is prepared to deal with transition to state with symmetry given
"""
self.G_es = symmetry
self._build_prec()
self.product_cache.reset()
def compute_products(self, vectors):
"""Given a set of vectors X Compute products
if ptype == rpa:
Returns pair (A+B)X, (A-B)X
if ptype == tda:
Returns AX
"""
n_old = self.product_cache.count()
n_new = len(vectors)
if n_new <= n_old:
self.product_cache.reset()
compute_vectors = vectors
else:
compute_vectors = vectors[n_old:]
n_prod = len(compute_vectors)
# Build base one and two electron quantities
Fx = self.wfn.onel_Hx(compute_vectors)
twoel = self.wfn.twoel_Hx(compute_vectors, False, "SO")
Jx, Kx = self._split_twoel(twoel)
# Switch between rpa and tda
if self.ptype == 'rpa':
H1X_new, H2X_new = self._combine_H1_H2(Fx, Jx, Kx)
for H1x in H1X_new:
self.vector_scale(-1.0, H1x)
for H2x in H2X_new:
self.vector_scale(-1.0, H2x)
H1X_all = self.product_cache.add("H1", H1X_new)
H2X_all = self.product_cache.add("H2", H2X_new)
return H1X_all, H2X_all, n_prod
else:
AX_new = self._combine_A(Fx, Jx, Kx)
for Ax in AX_new:
self.vector_scale(-1.0, Ax)
AX_all = self.product_cache.add("A", AX_new)
return AX_all, n_prod
def precondition(self, Rvec, shift):
"""Applies the preconditioner with a shift to a residual vector
value = R / (shift - preconditioner)
"""
for h in range(self.wfn.nirrep()):
den = shift - self.prec.nph[h]
den[np.abs(den) < 0.0001] = 1.0
Rvec.nph[h][:] /= den
return Rvec
def generate_guess(self, nguess):
"""Generate a set of guess vectors based on orbital energy differences
"""
deltas = []
guess_vectors = []
for ho in range(self.wfn.nirrep()):
hv = ho ^ self.G_trans
for i, ei in enumerate(self.E_occ.nph[ho]):
for a, ea in enumerate(self.E_vir.nph[hv]):
deltas.append((ea - ei, i, a, ho))
deltas_sorted = sorted(deltas, key=lambda x: x[0])
nguess = min(nguess, len(deltas_sorted))
for i in range(nguess):
v = self.new_vector()
oidx = deltas_sorted[i][1]
vidx = deltas_sorted[i][2]
h = deltas_sorted[i][3]
v.set(h, oidx, vidx, 1.0)
guess_vectors.append(v)
return guess_vectors
def residue(self, X, so_prop_ints):
# return zeros if spin multiplicity of GS and ES differ
if not self.singlet and (self.mult_gs == 1):
return np.zeros(len(so_prop_ints))
prop = [core.triplet(self.Co, x, self.Cv, True, False, False) for x in so_prop_ints]
return np.sqrt(2.0) * np.array([X.vector_dot(u) for u in prop])
## Helper functions
def _combine_H1_H2(self, Fx, Jx, Kx=None):
"""Build the combinations:
Singlet:
H1 X = [(Ea - Ei) + 4J - K - K^T]X
H2 X = [(Ea - Ei) - K + K^T]X
Triplet:
H1 X = [(Ea - Ei) - K - K^T]X
H2 X = [(Ea - Ei) - K + K^T]X
"""
H1X = []
H2X = []
if Kx is not None:
for Fxi, Jxi, Kxi in zip(Fx, Jx, Kx):
Kxit = self.vector_transpose(Kxi)
# H1x = -K singlet/triplet
H1X_so = self.vector_copy(Kxi)
H1X_so = self.vector_scale(-1.0, H1X_so)
# H1X -= K^T singlet/triplet
H1X_so = self.vector_axpy(-1.0, Kxit, H1X_so)
# H2x = K^T - K singlet/triplet
H2X_so = self.vector_axpy(-1.0, Kxi, Kxit)
if self.singlet:
# H1x += 4*J (singlet only)
H1X_so = self.vector_axpy(4.0, Jxi, H1X_so)
# transform + add Ea-Ei
H1X.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(H1X_so)))
H2X.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(H2X_so)))
else:
for Fxi, Jxi in zip(Fx, Jx):
if self.singlet:
H1X_so = self.vector_scale(4.0, Jxi)
H1X.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(H1X_so)))
else:
H1X.append(self.vector_copy(Fxi))
H2X.append(self.vector_copy(Fxi))
return H1X, H2X
def _combine_A(self, Fx, Jx, Kx=None):
"""Build the combinations
Singlet:
A X = [(Ea - Ei) + 2 J - K] X
Triplet:
A X = [(Ea - Ei) - K] X
"""
Ax = []
if Kx is not None:
for Fxi, Jxi, Kxi in zip(Fx, Jx, Kx):
Ax_so = self.vector_scale(-1.0, self.vector_copy(Kxi))
if self.singlet:
Ax_so = self.vector_axpy(2.0, Jxi, Ax_so)
Ax.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(Ax_so)))
else:
for Fxi, Jxi in zip(Fx, Jx):
if self.singlet:
Ax.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(self.vector_scale(2.0, Jxi))))
else:
Ax.append(self.vector_copy(Fxi))
return Ax
def _so_to_mo(self, X):
"""Transform (C_occ)^T X C_vir"""
return core.triplet(self.Co, X, self.Cv, True, False, False)
def _split_twoel(self, twoel):
"""Unpack J and K matrices
"""
if self.needs_K_like:
Jx = twoel[0::2]
Kx = twoel[1::2]
else:
Jx = twoel
Kx = None
return Jx, Kx
@property
def G_trans(self):
"""The symmetry of the transition vector"""
return self.G_gs ^ self.G_es
def _build_prec(self):
"""Builds energy denominator
"""
self.prec = self.new_vector()
for h in range(self.wfn.nirrep()):
self.prec.nph[h][:] = self.E_vir.nph[h ^ self.G_trans] - self.E_occ.nph[h].reshape(-1, 1)
class TDUSCFEngine(PairedMatPerVector):
"""Engine for U(HF/KS) products
Fulfills the API required by :class:`~psi4.driver.p4util.solvers.SolverEngine`
Parameters
----------
wfn : :py:class:`psi4.core.Wavefunction`
The converged SCF wfn
ptype : str {'rpa', 'tda'}
The product type to be evaluated. When ``ptype == 'rpa'``. The return of `compute_products` will be as
expected by :func:`~psi4.driver.p4util.solvers.hamiltonian_solver`, when ``ptype == 'tda'`` the return of
compute_products will be as expected by :func:`~psi4.driver.p4util.solvers.davidson_solver`.
"""
def __init__(self, wfn, *, ptype):
# Primary data
self.wfn = wfn
self.ptype = ptype
# Find product type
if ptype == 'rpa':
self.product_cache = ProductCache("H1", "H2")
else:
self.product_cache = ProductCache("A")
# Save orbitals and eigenvalues
self.Co = [wfn.Ca_subset("SO", "OCC"), wfn.Cb_subset("SO", "OCC")]
self.Cv = [wfn.Ca_subset("SO", "VIR"), wfn.Cb_subset("SO", "VIR")]
self.E_occ = [wfn.epsilon_a_subset("SO", "OCC"), wfn.epsilon_b_subset("SO", "OCC")]
self.E_vir = [wfn.epsilon_a_subset("SO", "VIR"), wfn.epsilon_b_subset("SO", "VIR")]
self.needs_K_like = self.wfn.functional().is_x_hybrid() or self.wfn.functional().is_x_lrc()
# dimensions
self.occpi = [self.wfn.nalphapi(), self.wfn.nbetapi()]
self.virpi = [self.wfn.nmopi() - self.occpi[0], self.wfn.nmopi() - self.occpi[1]]
self.nsopi = self.wfn.nsopi()
# Orbital energy differences
self.prec = [None, None]
# Ground state symmetry
self.G_gs = 0
for h in range(self.wfn.nirrep()):
for i in range(self.occpi[0][h] - self.occpi[1][h]):
self.G_gs = self.G_gs ^ h
# Excited state symmetry
self.G_es = None
self.reset_for_state_symm(0)
## API Required by "engine" (see p4util.solvers.davidson/hamiltonian_solver)
def precondition(self, Rvec, shift):
"""Applies the preconditioner with a shift to a residual vector
value = R / (shift - preconditioner)
"""
for h in range(self.wfn.nirrep()):
den = shift - self.prec[0].nph[h]
den[abs(den) < 0.0001] = 1.0
Rvec[0].nph[h][:] /= den
den = shift - self.prec[1].nph[h]
den[abs(den) < 0.0001] = 1.0
Rvec[1].nph[h][:] /= den
return Rvec
def compute_products(self, vectors):
"""Compute Products for a list of guess vectors (X).
if ptype == 'rpa':
H1 , H2
returns pair (A+B)X, (A-B)X products
if ptype == 'tda':
returns Ax products.
"""
n_old = self.product_cache.count()
n_new = len(vectors)
if n_new <= n_old:
self.product_cache.reset()
compute_vectors = vectors
else:
compute_vectors = vectors[n_old:]
n_prod = len(compute_vectors)
# flatten list of [(A,B)_i, ...] to [A_i, B_i, ...]
vec_flat = sum(compute_vectors, [])
Fx = self._pair_onel(self.wfn.onel_Hx(vec_flat))
twoel = self.wfn.twoel_Hx(vec_flat, False, "SO")
Jx, Kx = self._split_twoel(twoel)
if self.ptype == "rpa":
H1X_new, H2X_new = self._combine_H1_H2(Fx, Jx, Kx)
for H1x in H1X_new:
self.vector_scale(-1.0, H1x)
for H2x in H2X_new:
self.vector_scale(-1.0, H2x)
H1X_all = self.product_cache.add("H1", H1X_new)
H2X_all = self.product_cache.add("H2", H2X_new)
return H1X_all, H2X_all, n_prod
else:
AX_new = self._combine_A(Fx, Jx, Kx)
for Ax in AX_new:
self.vector_scale(-1.0, Ax)
AX_all = self.product_cache.add("A", AX_new)
return AX_all, n_prod
def generate_guess(self, nguess):
"""Generate a set of guess vectors based on orbital energy differences
"""
guess_vectors = []
deltas = []
for ho in range(self.wfn.nirrep()):
hv = self.G_trans ^ ho
for i, ei in enumerate(self.E_occ[0].nph[ho]):
for a, ea in enumerate(self.E_vir[0].nph[hv]):
deltas.append((ea - ei, 0, i, a, ho))
for i, ei in enumerate(self.E_occ[1].nph[ho]):
for a, ea in enumerate(self.E_vir[1].nph[hv]):
deltas.append((ea - ei, 1, i, a, ho))
deltas_sorted = sorted(deltas, key=lambda x: x[0])
nguess = min(nguess, len(deltas_sorted))
for i in range(nguess):
v = self.new_vector()
spin = deltas_sorted[i][1]
oidx = deltas_sorted[i][2]
vidx = deltas_sorted[i][3]
h = deltas_sorted[i][4]
v[spin].set(h, oidx, vidx, 1.0)
guess_vectors.append(v)
return guess_vectors
def new_vector(self, name=""):
"""Build a new object with shape symmetry like a trial vector """
return [
core.Matrix(name + 'a', self.occpi[0], self.virpi[0], self.G_trans),
core.Matrix(name + 'b', self.occpi[1], self.virpi[1], self.G_trans)
]
def reset_for_state_symm(self, symmetry):
"""Reset internal quantities so the object is prepared to deal with transition to state with symmetry given
"""
self.G_es = symmetry
self._build_prec()
self.product_cache.reset()
def residue(self, X, so_prop_ints):
prop_a = [core.triplet(self.Co[0], x, self.Cv[0], True, False, False) for x in so_prop_ints]
prop_b = [core.triplet(self.Co[1], x, self.Cv[1], True, False, False) for x in so_prop_ints]
return np.array([X[0].vector_dot(u[0]) + X[1].vector_dot(u[1]) for u in zip(prop_a, prop_b)])
## Helper Functions
def _combine_H1_H2(self, Fx, Jx, Kx=None):
"""Build the combinations:
H1 X = [(Ea - Ei) + 2J - K - K^T]X
H2 X = [(Ea - Ei) - K + K^T]X
"""
H1X = []
H2X = []
if Kx is not None:
for Fxi, Jxi, Kxi in zip(Fx, Jx, Kx):
H1X_so = self.vector_scale(2.0, Jxi)
Kxit = self.vector_transpose(Kxi)
H1X_so = self.vector_axpy(-1.0, Kxi, H1X_so)
H1X_so = self.vector_axpy(-1.0, Kxit, H1X_so)
H1X.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(H1X_so)))
H2X_so = self.vector_axpy(-1.0, Kxi, Kxit)
H2X.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(H2X_so)))
else:
for Fxi, Jxi in zip(Fx, Jx):
H1X_so = self.vector_scale(2.0, Jxi)
H1X.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(H1X_so)))
H2X.append(self.vector_copy(Fxi))
return H1X, H2X
def _combine_A(self, Fx, Jx, Kx):
"""Build the combination
A X = [(Ea-Ei) + J - K] X
"""
Ax = []
if Kx is not None:
for Fxi, Jxi, Kxi in zip(Fx, Jx, Kx):
Ax_so = self.vector_axpy(-1.0, Kxi, Jxi)
Ax.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(Ax_so)))
else:
for Fxi, Jxi in zip(Fx, Jx):
Ax.append(self.vector_axpy(1.0, Fxi, self._so_to_mo(Jxi)))
return Ax
def _so_to_mo(self, X):
"""Transform (C_occ)^T X C_vir"""
return [core.triplet(self.Co[i], X[i], self.Cv[i], True, False, False) for i in (0, 1)]
def _pair_onel(self, onel):
"""Pair up A/B from onel_Hx return"""
return list(zip(onel[0::2], onel[1::2]))
def _split_twoel(self, twoel):
"""Unpack J and K matrices and pair alpha/beta
"""
if self.needs_K_like:
Jx = list(zip(twoel[0::4], twoel[1::4]))
Kx = list(zip(twoel[2::4], twoel[3::4]))
else:
Jx = list(zip(twoel[0::2], twoel[1::2]))
Kx = None
return Jx, Kx
@property
def G_trans(self):
"""Symmetry of transition vector"""
return self.G_gs ^ self.G_es
def _build_prec(self):
"""Builds energy denominator
"""
self.prec = self.new_vector()
for h in range(self.wfn.nirrep()):
self.prec[0].nph[h][:] = self.E_vir[0].nph[h ^ self.G_trans] - self.E_occ[0].nph[h].reshape(-1, 1)
self.prec[1].nph[h][:] = self.E_vir[1].nph[h ^ self.G_trans] - self.E_occ[1].nph[h].reshape(-1, 1)
| psi4/psi4 | psi4/driver/procrouting/response/scf_products.py | Python | lgpl-3.0 | 21,923 | [
"Psi4"
] | d06634059cddc309e2a976f03130fe6eeff93d7b708c364697f6636d9f09790b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.